From 0de886e1c7d3a30eca452a8192a722d3ff9f6228 Mon Sep 17 00:00:00 2001 From: Tamal Saha Date: Mon, 3 Sep 2018 12:02:55 -0700 Subject: [PATCH] Use IntHash as status.observedGeneration (#1231) Signed-off-by: Tamal Saha --- api/crds/certificate.yaml | 8 +- api/crds/ingress.yaml | 8 +- api/openapi-spec/swagger.json | 16 +- apis/voyager/v1beta1/certificate.go | 6 +- apis/voyager/v1beta1/ingress.go | 6 +- apis/voyager/v1beta1/openapi_generated.go | 36 +- apis/voyager/v1beta1/zz_generated.deepcopy.go | 16 + docs/reference/voyager_run.md | 14 +- glide.lock | 21 +- hack/codegen.sh | 2 +- .../go/encoding/json/types/array_or_int.go | 55 + .../go/encoding/json/types/array_or_string.go | 57 + .../go/encoding/json/types/bool_yo.go | 33 + .../appscode/go/encoding/json/types/doc.go | 2 + .../go/encoding/json/types/int_hash.go | 180 ++ .../go/encoding/json/types/str_to_bool.go | 44 + .../appscode/go/encoding/json/types/str_yo.go | 43 + .../appscode/go/encoding/json/types/urlamp.go | 105 + .../appscode/go/encoding/json/types/urlset.go | 97 + vendor/github.com/appscode/go/log/log.go | 20 +- vendor/github.com/appscode/kutil/meta/cmp.go | 8 +- vendor/github.com/appscode/kutil/meta/hash.go | 22 +- vendor/github.com/appscode/mergo/map.go | 52 +- vendor/github.com/appscode/mergo/merge.go | 159 +- vendor/github.com/appscode/mergo/mergo.go | 9 +- .../github.com/cpuguy83/go-md2man/md2man.go | 31 +- .../cpuguy83/go-md2man/md2man/md2man.go | 1 + .../cpuguy83/go-md2man/md2man/roff.go | 98 +- .../sergi/go-diff/{LICENSE => LICENSE.txt} | 2 +- .../sergi/go-diff/diffmatchpatch/diff.go | 1339 ---------- .../go-diff/diffmatchpatch/diffmatchpatch.go | 46 - .../sergi/go-diff/diffmatchpatch/dmp.go | 2207 +++++++++++++++++ .../sergi/go-diff/diffmatchpatch/match.go | 160 -- .../sergi/go-diff/diffmatchpatch/mathutil.go | 23 - .../sergi/go-diff/diffmatchpatch/patch.go | 556 ----- .../sergi/go-diff/diffmatchpatch/stack.go | 66 + .../go-diff/diffmatchpatch/stringutil.go | 88 - vendor/github.com/yudai/gojsondiff/LICENSE | 2 +- vendor/github.com/yudai/gojsondiff/deltas.go | 2 +- .../yudai/gojsondiff/formatter/ascii.go | 8 +- vendor/gopkg.in/ini.v1/file.go | 15 +- vendor/gopkg.in/ini.v1/ini.go | 15 +- vendor/gopkg.in/ini.v1/parser.go | 99 +- 43 files changed, 3268 insertions(+), 2509 deletions(-) create mode 100644 vendor/github.com/appscode/go/encoding/json/types/array_or_int.go create mode 100644 vendor/github.com/appscode/go/encoding/json/types/array_or_string.go create mode 100644 vendor/github.com/appscode/go/encoding/json/types/bool_yo.go create mode 100644 vendor/github.com/appscode/go/encoding/json/types/doc.go create mode 100644 vendor/github.com/appscode/go/encoding/json/types/int_hash.go create mode 100644 vendor/github.com/appscode/go/encoding/json/types/str_to_bool.go create mode 100644 vendor/github.com/appscode/go/encoding/json/types/str_yo.go create mode 100644 vendor/github.com/appscode/go/encoding/json/types/urlamp.go create mode 100644 vendor/github.com/appscode/go/encoding/json/types/urlset.go rename vendor/github.com/sergi/go-diff/{LICENSE => LICENSE.txt} (93%) delete mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go delete mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/dmp.go delete mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/match.go delete mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go delete mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/stack.go delete mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go diff --git a/api/crds/certificate.yaml b/api/crds/certificate.yaml index a5c1c8794..a398a9036 100644 --- a/api/crds/certificate.yaml +++ b/api/crds/certificate.yaml @@ -437,12 +437,8 @@ spec: - certURL - certStableURL observedGeneration: - description: observedGeneration is the most recent generation observed - for this resource. It corresponds to the resource's generation, which - is updated on mutation by the API Server. - format: int64 - type: integer - observedGenerationHash: + description: IntHash represents as int64 Generation and string Hash. + It is json serialized into $. type: string version: v1beta1 versions: diff --git a/api/crds/ingress.yaml b/api/crds/ingress.yaml index 1f40b8a02..0f477fcdb 100644 --- a/api/crds/ingress.yaml +++ b/api/crds/ingress.yaml @@ -1436,12 +1436,8 @@ spec: type: string type: array observedGeneration: - description: observedGeneration is the most recent generation observed - for this resource. It corresponds to the resource's generation, which - is updated on mutation by the API Server. - format: int64 - type: integer - observedGenerationHash: + description: IntHash represents as int64 Generation and string Hash. + It is json serialized into $. type: string version: v1beta1 versions: diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index c84d41e4b..7257ddec2 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -1900,6 +1900,10 @@ } }, "definitions": { + "com.github.appscode.go.encoding.json.types.IntHash": { + "description": "IntHash represents as int64 Generation and string Hash. It is json serialized into \u003cint64\u003e$\u003chash_string\u003e.", + "type": "string" + }, "com.github.appscode.voyager.apis.voyager.v1beta1.AuthOption": { "properties": { "basic": { @@ -2079,11 +2083,7 @@ }, "observedGeneration": { "description": "observedGeneration is the most recent generation observed for this resource. It corresponds to the resource's generation, which is updated on mutation by the API Server.", - "type": "integer", - "format": "int64" - }, - "observedGenerationHash": { - "type": "string" + "$ref": "#/definitions/com.github.appscode.go.encoding.json.types.IntHash" } } }, @@ -2449,11 +2449,7 @@ }, "observedGeneration": { "description": "observedGeneration is the most recent generation observed for this resource. It corresponds to the resource's generation, which is updated on mutation by the API Server.", - "type": "integer", - "format": "int64" - }, - "observedGenerationHash": { - "type": "string" + "$ref": "#/definitions/com.github.appscode.go.encoding.json.types.IntHash" } } }, diff --git a/apis/voyager/v1beta1/certificate.go b/apis/voyager/v1beta1/certificate.go index e491bb2fa..6cedb657d 100644 --- a/apis/voyager/v1beta1/certificate.go +++ b/apis/voyager/v1beta1/certificate.go @@ -1,6 +1,7 @@ package v1beta1 import ( + "github.com/appscode/go/encoding/json/types" core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -80,10 +81,7 @@ type CertificateStatus struct { // observedGeneration is the most recent generation observed for this resource. It corresponds to the // resource's generation, which is updated on mutation by the API Server. // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` - - // +optional - ObservedGenerationHash string `json:"observedGenerationHash,omitempty"` + ObservedGeneration *types.IntHash `json:"observedGeneration,omitempty"` CreationTime *metav1.Time `json:"creationTime,omitempty"` Conditions []CertificateCondition `json:"conditions,omitempty"` diff --git a/apis/voyager/v1beta1/ingress.go b/apis/voyager/v1beta1/ingress.go index 8c6eacc8a..4deb307f3 100644 --- a/apis/voyager/v1beta1/ingress.go +++ b/apis/voyager/v1beta1/ingress.go @@ -1,6 +1,7 @@ package v1beta1 import ( + "github.com/appscode/go/encoding/json/types" core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -163,10 +164,7 @@ type IngressStatus struct { // observedGeneration is the most recent generation observed for this resource. It corresponds to the // resource's generation, which is updated on mutation by the API Server. // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` - - // +optional - ObservedGenerationHash string `json:"observedGenerationHash,omitempty"` + ObservedGeneration *types.IntHash `json:"observedGeneration,omitempty"` // LoadBalancer contains the current status of the load-balancer. LoadBalancer core.LoadBalancerStatus `json:"loadBalancer,omitempty"` diff --git a/apis/voyager/v1beta1/openapi_generated.go b/apis/voyager/v1beta1/openapi_generated.go index a886bbdaf..940b73206 100644 --- a/apis/voyager/v1beta1/openapi_generated.go +++ b/apis/voyager/v1beta1/openapi_generated.go @@ -23,6 +23,7 @@ limitations under the License. package v1beta1 import ( + types "github.com/appscode/go/encoding/json/types" spec "github.com/go-openapi/spec" resource "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,6 +33,7 @@ import ( func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ + "github.com/appscode/go/encoding/json/types.IntHash": schema_go_encoding_json_types_IntHash(ref), "github.com/appscode/voyager/apis/voyager/v1beta1.ACMECertificateDetails": schema_voyager_apis_voyager_v1beta1_ACMECertificateDetails(ref), "github.com/appscode/voyager/apis/voyager/v1beta1.AuthOption": schema_voyager_apis_voyager_v1beta1_AuthOption(ref), "github.com/appscode/voyager/apis/voyager/v1beta1.BasicAuth": schema_voyager_apis_voyager_v1beta1_BasicAuth(ref), @@ -306,6 +308,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA } } +func schema_go_encoding_json_types_IntHash(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "IntHash represents as int64 Generation and string Hash. It is json serialized into $.", + Type: types.IntHash{}.OpenAPISchemaType(), + Format: types.IntHash{}.OpenAPISchemaFormat(), + }, + }, + } +} + func schema_voyager_apis_voyager_v1beta1_ACMECertificateDetails(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -640,14 +654,7 @@ func schema_voyager_apis_voyager_v1beta1_CertificateStatus(ref common.ReferenceC "observedGeneration": { SchemaProps: spec.SchemaProps{ Description: "observedGeneration is the most recent generation observed for this resource. It corresponds to the resource's generation, which is updated on mutation by the API Server.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "observedGenerationHash": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Ref: ref("github.com/appscode/go/encoding/json/types.IntHash"), }, }, "creationTime": { @@ -676,7 +683,7 @@ func schema_voyager_apis_voyager_v1beta1_CertificateStatus(ref common.ReferenceC }, }, Dependencies: []string{ - "github.com/appscode/voyager/apis/voyager/v1beta1.CertificateCondition", "github.com/appscode/voyager/apis/voyager/v1beta1.CertificateDetails", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + "github.com/appscode/go/encoding/json/types.IntHash", "github.com/appscode/voyager/apis/voyager/v1beta1.CertificateCondition", "github.com/appscode/voyager/apis/voyager/v1beta1.CertificateDetails", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } @@ -1478,14 +1485,7 @@ func schema_voyager_apis_voyager_v1beta1_IngressStatus(ref common.ReferenceCallb "observedGeneration": { SchemaProps: spec.SchemaProps{ Description: "observedGeneration is the most recent generation observed for this resource. It corresponds to the resource's generation, which is updated on mutation by the API Server.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "observedGenerationHash": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Ref: ref("github.com/appscode/go/encoding/json/types.IntHash"), }, }, "loadBalancer": { @@ -1498,7 +1498,7 @@ func schema_voyager_apis_voyager_v1beta1_IngressStatus(ref common.ReferenceCallb }, }, Dependencies: []string{ - "k8s.io/api/core/v1.LoadBalancerStatus"}, + "github.com/appscode/go/encoding/json/types.IntHash", "k8s.io/api/core/v1.LoadBalancerStatus"}, } } diff --git a/apis/voyager/v1beta1/zz_generated.deepcopy.go b/apis/voyager/v1beta1/zz_generated.deepcopy.go index 3fd16a25d..14803bf5a 100644 --- a/apis/voyager/v1beta1/zz_generated.deepcopy.go +++ b/apis/voyager/v1beta1/zz_generated.deepcopy.go @@ -220,6 +220,14 @@ func (in *CertificateSpec) DeepCopy() *CertificateSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CertificateStatus) DeepCopyInto(out *CertificateStatus) { *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } if in.CreationTime != nil { in, out := &in.CreationTime, &out.CreationTime if *in == nil { @@ -759,6 +767,14 @@ func (in *IngressSpec) DeepCopy() *IngressSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + if *in == nil { + *out = nil + } else { + *out = (*in).DeepCopy() + } + } in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) return } diff --git a/docs/reference/voyager_run.md b/docs/reference/voyager_run.md index eaff6d89d..1149f8a92 100644 --- a/docs/reference/voyager_run.md +++ b/docs/reference/voyager_run.md @@ -36,6 +36,10 @@ voyager run [flags] --audit-log-maxsize int The maximum size in megabytes of the audit log file before it gets rotated. --audit-log-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking. (default "blocking") --audit-log-path string If set, all requests coming to the apiserver will be logged to this file. '-' means standard out. + --audit-log-truncate-enabled Whether event and batch truncating is enabled. + --audit-log-truncate-max-batch-size int Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. (default 10485760) + --audit-log-truncate-max-event-size int Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, andif this doesn't reduce the size enough, event is discarded. (default 102400) + --audit-log-version string API group and version used for serializing audit events written to log. (default "audit.k8s.io/v1beta1") --audit-policy-file string Path to the file that defines the audit policy configuration. Requires the 'AdvancedAuditing' feature gate. With AdvancedAuditing, a profile is required to enable auditing. --audit-webhook-batch-buffer-size int The size of the buffer to store events before batching and writing. Only used in batch mode. (default 10000) --audit-webhook-batch-max-size int The maximum size of a batch. Only used in batch mode. (default 400) @@ -46,6 +50,10 @@ voyager run [flags] --audit-webhook-config-file string Path to a kubeconfig formatted file that defines the audit webhook configuration. Requires the 'AdvancedAuditing' feature gate. --audit-webhook-initial-backoff duration The amount of time to wait before retrying the first failed request. (default 10s) --audit-webhook-mode string Strategy for sending audit events. Blocking indicates sending events should block server responses. Batch causes the backend to buffer and write events asynchronously. Known modes are batch,blocking. (default "batch") + --audit-webhook-truncate-enabled Whether event and batch truncating is enabled. + --audit-webhook-truncate-max-batch-size int Maximum size of the batch sent to the underlying backend. Actual serialized size can be several hundreds of bytes greater. If a batch exceeds this limit, it is split into several batches of smaller size. (default 10485760) + --audit-webhook-truncate-max-event-size int Maximum size of the audit event sent to the underlying backend. If the size of an event is greater than this number, first request and response are removed, andif this doesn't reduce the size enough, event is discarded. (default 102400) + --audit-webhook-version string API group and version used for serializing audit events written to webhook. (default "audit.k8s.io/v1beta1") --authentication-kubeconfig string kubeconfig file pointing at the 'core' kubernetes server with enough rights to create tokenaccessreviews.authentication.k8s.io. --authentication-skip-lookup If false, the authentication-kubeconfig will be used to lookup missing authentication configuration from the cluster. --authentication-token-webhook-cache-ttl duration The duration to cache responses from the webhook token authenticator. (default 10s) @@ -78,7 +86,7 @@ voyager run [flags] --qps float The maximum QPS to the master from this client (default 1e+06) --rbac Enable RBAC for operator & offshoot Kubernetes objects --requestheader-allowed-names strings List of client certificate common names to allow to provide usernames in headers specified by --requestheader-username-headers. If empty, any client certificate validated by the authorities in --requestheader-client-ca-file is allowed. - --requestheader-client-ca-file string Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers + --requestheader-client-ca-file string Root certificate bundle to use to verify client certificates on incoming requests before trusting usernames in headers specified by --requestheader-username-headers. WARNING: generally do not depend on authorization being already done for incoming requests. --requestheader-extra-headers-prefix strings List of request header prefixes to inspect. X-Remote-Extra- is suggested. (default [x-remote-extra-]) --requestheader-group-headers strings List of request headers to inspect for groups. X-Remote-Group is suggested. (default [x-remote-group]) --requestheader-username-headers strings List of request headers to inspect for usernames. X-Remote-User is common. (default [x-remote-user]) @@ -86,8 +94,8 @@ voyager run [flags] --resync-period duration If non-zero, will re-list this often. Otherwise, re-list will be delayed aslong as possible (until the upstream source closes the watch or times out. (default 10m0s) --secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 443) --tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir. - --tls-cipher-suites strings Comma-separated list of cipher suites for the server. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). If omitted, the default Go cipher suites will be used - --tls-min-version string Minimum TLS version supported. Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants. + --tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be use. Possible values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_RC4_128_SHA,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_RC4_128_SHA + --tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12 --tls-private-key-file string File containing the default x509 private key matching --tls-cert-file. --tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default []) --validate-haproxy-config If true, validates generated haproxy.cfg before sending to HAProxy pods. (default true) diff --git a/glide.lock b/glide.lock index f9f6c7410..42244f22f 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ hash: 203c4be40b193eb284ec82c6e072e22dce8d8b391fca25a9449cc5c03a1104dd -updated: 2018-08-27T22:15:15.40717666-07:00 +updated: 2018-09-03T11:49:41.63909926-07:00 imports: - name: bitbucket.org/ww/goautoneg version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 @@ -16,11 +16,12 @@ imports: - edgegrid - jsonhooks-v1 - name: github.com/appscode/go - version: df3c57fca2bef0e5b7fd085507deb20f18c790df + version: 0499c350ad263174dc8c1e3006b2d3300ff398e6 subpackages: - analytics - context - crypto/rand + - encoding/json/types - flags - homedir - ioutil @@ -43,7 +44,7 @@ imports: - client/workload/v1 - registry/admissionreview/v1beta1 - name: github.com/appscode/kutil - version: ab96b8d0056d39ca0c57dc6705864423b50515bc + version: c0e187ec9d028e1ad6bdab9dfe2612838a02bf50 subpackages: - apiextensions/v1beta1 - core/v1 @@ -57,7 +58,7 @@ imports: - tools/clientcmd - tools/queue - name: github.com/appscode/mergo - version: e3000cb3d28c72b837601cac94debd91032d19fe + version: 9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4 - name: github.com/appscode/ocutil version: 9e3ab89e810deaabe1fd353d01913aaf7dbe6e6d - name: github.com/appscode/pat @@ -206,7 +207,7 @@ imports: subpackages: - pkg/client/monitoring/v1 - name: github.com/cpuguy83/go-md2man - version: 71acacd42f85e5e82f70a55327789582a5200a90 + version: 20f5889cbdc3c73dbd2862796665e7c465ade7d1 subpackages: - md2man - name: github.com/davecgh/go-spew @@ -308,7 +309,7 @@ imports: - name: github.com/hashicorp/errwrap version: 8a6fb523712970c966eefc6b39ed2c5e74880354 - name: github.com/hashicorp/go-cleanhttp - version: d5fe4b57a186c716b0e00b8c301cbd9b4182694d + version: e8ab9daed8d1ddd2d3c4efba338fe2eeae2e4f18 - name: github.com/hashicorp/go-multierror version: 886a7fbe3eb1c874d46f623bfa70af45f425b3d1 - name: github.com/hashicorp/go-retryablehttp @@ -482,8 +483,7 @@ imports: - name: github.com/ryanuber/go-glob version: 256dc444b735e061061cf46c809487313d5b0065 - name: github.com/sergi/go-diff - version: 24e2351369ec4949b2ed0dc5c477afdd4c4034e8 - repo: https://github.com/sergi/go-diff + version: 97b2266dfe4bd4ea1b81a463322f04f8b724801e subpackages: - diffmatchpatch - name: github.com/shurcooL/sanitized_anchor_name @@ -539,12 +539,11 @@ imports: - providers/dns/route53 - providers/dns/vultr - name: github.com/yudai/gojsondiff - version: 0525c875b75ca60b9e67ddc44496aa16f21066b0 + version: 7b1b7adf999dab73a6eb02669c3d82dbb27a3dd6 subpackages: - formatter - name: github.com/yudai/golcs version: d1c525dea8ce39ea9a783d33cf08932305373f2c - repo: https://github.com/yudai/golcs - name: golang.org/x/crypto version: 49796115aa4b964c318aad4f3084fdb41e9aa067 subpackages: @@ -673,7 +672,7 @@ imports: - name: gopkg.in/inf.v0 version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 - name: gopkg.in/ini.v1 - version: 06f5f3d67269ccec1fe5fe4134ba6e982984f7f5 + version: 32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a - name: gopkg.in/natefinch/lumberjack.v2 version: 20b71e5b60d756d3d2f80def009790325acc2b23 - name: gopkg.in/square/go-jose.v2 diff --git a/hack/codegen.sh b/hack/codegen.sh index 3e4c7c780..6c472207b 100755 --- a/hack/codegen.sh +++ b/hack/codegen.sh @@ -28,7 +28,7 @@ docker run --rm -ti -u $(id -u):$(id -g) \ appscode/gengo:release-1.11 openapi-gen \ --v 1 --logtostderr \ --go-header-file "hack/gengo/boilerplate.go.txt" \ - --input-dirs "$PACKAGE_NAME/apis/voyager/v1beta1,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/api/resource,k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/util/intstr,k8s.io/apimachinery/pkg/version,k8s.io/api/core/v1" \ + --input-dirs "$PACKAGE_NAME/apis/voyager/v1beta1,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/api/resource,k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/util/intstr,k8s.io/apimachinery/pkg/version,k8s.io/api/core/v1,github.com/appscode/go/encoding/json/types" \ --output-package "$PACKAGE_NAME/apis/voyager/v1beta1" # Generate crds.yaml and swagger.json diff --git a/vendor/github.com/appscode/go/encoding/json/types/array_or_int.go b/vendor/github.com/appscode/go/encoding/json/types/array_or_int.go new file mode 100644 index 000000000..9494ff1db --- /dev/null +++ b/vendor/github.com/appscode/go/encoding/json/types/array_or_int.go @@ -0,0 +1,55 @@ +package types + +import ( + "bytes" + "encoding/json" + "errors" + "strconv" +) + +/* + GO => Json + [] => `[]` + [1] => `1` +[1, 2] => `[1,2]` +*/ +type ArrayOrInt []int + +func (m *ArrayOrInt) MarshalJSON() ([]byte, error) { + a := *m + n := len(a) + var buf bytes.Buffer + if n == 1 { + buf.WriteString(strconv.Itoa(a[0])) + } else { + buf.WriteString(`[`) + + for i := 0; i < n; i++ { + if i > 0 { + buf.WriteString(`,`) + } + buf.WriteString(strconv.Itoa(a[i])) + } + + buf.WriteString(`]`) + } + return buf.Bytes(), nil +} + +func (m *ArrayOrInt) UnmarshalJSON(data []byte) error { + if m == nil { + return errors.New("jsontypes.ArrayOrInt: UnmarshalJSON on nil pointer") + } + var err error + if data[0] == '[' { + var a []int + err = json.Unmarshal(data, &a) + if err == nil { + *m = a + } + } else { + v, _ := strconv.Atoi(string(data)) + *m = append((*m)[0:0], v) + } + return err +} diff --git a/vendor/github.com/appscode/go/encoding/json/types/array_or_string.go b/vendor/github.com/appscode/go/encoding/json/types/array_or_string.go new file mode 100644 index 000000000..10015f145 --- /dev/null +++ b/vendor/github.com/appscode/go/encoding/json/types/array_or_string.go @@ -0,0 +1,57 @@ +package types + +import ( + "bytes" + "encoding/json" + "errors" +) + +/* + GO => Json + [] => `[]` + ["a"] => `"a"` +["a", "b"] => `["a","b"]` +*/ +type ArrayOrString []string + +func (m *ArrayOrString) MarshalJSON() ([]byte, error) { + a := *m + n := len(a) + var buf bytes.Buffer + if n == 1 { + buf.WriteString(`"`) + buf.WriteString(a[0]) + buf.WriteString(`"`) + } else { + buf.WriteString(`[`) + + for i := 0; i < n; i++ { + if i > 0 { + buf.WriteString(`,`) + } + buf.WriteString(`"`) + buf.WriteString(a[i]) + buf.WriteString(`"`) + } + + buf.WriteString(`]`) + } + return buf.Bytes(), nil +} + +func (m *ArrayOrString) UnmarshalJSON(data []byte) error { + if m == nil { + return errors.New("jsontypes.ArrayOrString: UnmarshalJSON on nil pointer") + } + var err error + if data[0] == '[' { + var a []string + err = json.Unmarshal(data, &a) + if err == nil { + *m = a + } + } else { + *m = append((*m)[0:0], string(data[1:len(data)-1])) + } + return err +} diff --git a/vendor/github.com/appscode/go/encoding/json/types/bool_yo.go b/vendor/github.com/appscode/go/encoding/json/types/bool_yo.go new file mode 100644 index 000000000..ea8395430 --- /dev/null +++ b/vendor/github.com/appscode/go/encoding/json/types/bool_yo.go @@ -0,0 +1,33 @@ +package types + +import ( + "errors" + "strconv" +) + +type BoolYo bool + +func (m *BoolYo) MarshalJSON() ([]byte, error) { + a := *m + if a { + return []byte(`"true"`), nil + } + return []byte(`"false"`), nil +} + +func (m *BoolYo) UnmarshalJSON(data []byte) error { + if m == nil { + return errors.New("jsontypes.BoolYo: UnmarshalJSON on nil pointer") + } + + n := len(data) + var in string + if data[0] == '"' && data[n-1] == '"' { + in = string(data[1 : n-1]) + } else { + in = string(data) + } + v, err := strconv.ParseBool(in) + *m = BoolYo(v) + return err +} diff --git a/vendor/github.com/appscode/go/encoding/json/types/doc.go b/vendor/github.com/appscode/go/encoding/json/types/doc.go new file mode 100644 index 000000000..d3ca266e2 --- /dev/null +++ b/vendor/github.com/appscode/go/encoding/json/types/doc.go @@ -0,0 +1,2 @@ +// Package types provides a collection of Golang types with JSON marshaling support +package types diff --git a/vendor/github.com/appscode/go/encoding/json/types/int_hash.go b/vendor/github.com/appscode/go/encoding/json/types/int_hash.go new file mode 100644 index 000000000..022400344 --- /dev/null +++ b/vendor/github.com/appscode/go/encoding/json/types/int_hash.go @@ -0,0 +1,180 @@ +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/appscode/go/types" +) + +/* +IntHash represents as int64 Generation and string Hash. It is json serialized into $. +*/ +// +k8s:openapi-gen=true +type IntHash struct { + generation int64 + hash string +} + +func ParseIntHash(v interface{}) (*IntHash, error) { + switch m := v.(type) { + case nil: + return &IntHash{}, nil + case int: + return &IntHash{generation: int64(m)}, nil + case int64: + return &IntHash{generation: m}, nil + case *int64: + return &IntHash{generation: types.Int64(m)}, nil + case IntHash: + return &m, nil + case *IntHash: + return m, nil + case string: + return parseStringIntoIntHash(m) + case *string: + return parseStringIntoIntHash(types.String(m)) + default: + return nil, fmt.Errorf("failed to parse type %s into IntHash", reflect.TypeOf(v).String()) + } +} + +func parseStringIntoIntHash(s string) (*IntHash, error) { + if s == "" { + return &IntHash{}, nil + } + + idx := strings.IndexRune(s, '$') + switch { + case idx <= 0: + return nil, errors.New("missing generation") + case idx == len(s)-1: + return nil, errors.New("missing hash") + default: + i, err := strconv.ParseInt(s[:idx], 10, 64) + if err != nil { + return nil, err + } + h := s[idx+1:] + return &IntHash{generation: i, hash: h}, nil + } +} + +func NewIntHash(i int64, h string) *IntHash { return &IntHash{generation: i, hash: h} } + +func IntHashForGeneration(i int64) *IntHash { return &IntHash{generation: i} } + +func IntHashForHash(h string) *IntHash { return &IntHash{hash: h} } + +func (m IntHash) Generation() int64 { + return m.generation +} + +func (m IntHash) Hash() string { + return m.hash +} + +// IsZero returns true if the value is nil or time is zero. +func (m *IntHash) IsZero() bool { + if m == nil { + return true + } + return m.generation == 0 && m.hash == "" +} + +func (m *IntHash) Equal(u *IntHash) bool { + if m == nil { + return u == nil + } + if u == nil { // t != nil + return false + } + if m == u { + return true + } + if m.generation == u.generation { + return m.hash == u.hash + } + return false +} + +func (m *IntHash) DeepCopyInto(out *IntHash) { + *out = *m +} + +func (m *IntHash) DeepCopy() *IntHash { + if m == nil { + return nil + } + out := new(IntHash) + m.DeepCopyInto(out) + return out +} + +func (m IntHash) String() string { + return fmt.Sprintf(`%d$%s`, m.generation, m.hash) +} + +func (m *IntHash) MarshalJSON() ([]byte, error) { + if m == nil { + return nil, nil + } + if m.hash == "" { + return json.Marshal(m.generation) + } + return json.Marshal(m.String()) +} + +func (m *IntHash) UnmarshalJSON(data []byte) error { + if m == nil { + return errors.New("jsontypes.IntHash: UnmarshalJSON on nil pointer") + } + + if data[0] == '"' { + var s string + err := json.Unmarshal(data, &s) + if err != nil { + return err + } + ih, err := ParseIntHash(s) + if err != nil { + return err + } + *m = *ih + return nil + } else if bytes.Equal(data, []byte("null")) { + return nil + } + + var i int64 + err := json.Unmarshal(data, &i) + if err != nil { + return err + } + m.generation = i + return nil +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ IntHash) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ IntHash) OpenAPISchemaFormat() string { return "" } + +// MarshalQueryParameter converts to a URL query parameter value +func (m IntHash) MarshalQueryParameter() (string, error) { + if m.IsZero() { + // Encode unset/nil objects as an empty string + return "", nil + } + return m.String(), nil +} diff --git a/vendor/github.com/appscode/go/encoding/json/types/str_to_bool.go b/vendor/github.com/appscode/go/encoding/json/types/str_to_bool.go new file mode 100644 index 000000000..b8fcc7169 --- /dev/null +++ b/vendor/github.com/appscode/go/encoding/json/types/str_to_bool.go @@ -0,0 +1,44 @@ +package types + +import ( + "errors" +) + +/* +StrToBool turns strings into bool when marshaled to Json. Empty strings are converted to false. Non-empty string, eg, +`"false"` will become True bool value. If already a json bool, then no change is made. + +This can be used to turn a string to bool if you have existing Json data. +*/ +type StrToBool bool + +func (m *StrToBool) MarshalJSON() ([]byte, error) { + a := *m + if a { + return []byte("true"), nil + } + return []byte("false"), nil +} + +func (m *StrToBool) UnmarshalJSON(data []byte) error { + if m == nil { + return errors.New("jsontypes.StrToBool: UnmarshalJSON on nil pointer") + } + var err error + if data[0] == '"' { + // non empty string == true + *m = (len(data) - 2) > 0 + } else { + switch string(data) { + case "true": + *m = true + err = nil + case "false": + *m = false + err = nil + default: + err = errors.New("jsontypes.StrToBool: UnmarshalJSON failed for " + string(data)) + } + } + return err +} diff --git a/vendor/github.com/appscode/go/encoding/json/types/str_yo.go b/vendor/github.com/appscode/go/encoding/json/types/str_yo.go new file mode 100644 index 000000000..1dd083f12 --- /dev/null +++ b/vendor/github.com/appscode/go/encoding/json/types/str_yo.go @@ -0,0 +1,43 @@ +package types + +import ( + "bytes" + "encoding/json" + "errors" + "unicode/utf8" +) + +/* +StrYo turns non-strings into into a string by adding quotes around it into bool, +when marshaled to Json. If input is already string, no change is done. +*/ +type StrYo string + +func (m *StrYo) UnmarshalJSON(data []byte) error { + if m == nil { + return errors.New("jsontypes.StrYo: UnmarshalJSON on nil pointer") + } + + if data[0] == '"' { + var s string + err := json.Unmarshal(data, &s) + if err != nil { + return err + } + *m = StrYo(s) + return nil + } else if data[0] == '{' { + return errors.New("jsontypes.StrYo: Expected string, found object") + } else if data[0] == '[' { + return errors.New("jsontypes.StrYo: Expected string, found array") + } else if bytes.Equal(data, []byte("null")) { + *m = "" + return nil + } + d := string(data) + if utf8.ValidString(d) { + *m = StrYo(d) + return nil + } + return errors.New("jsontypes.StrYo: Found invalid utf8 byte array") +} diff --git a/vendor/github.com/appscode/go/encoding/json/types/urlamp.go b/vendor/github.com/appscode/go/encoding/json/types/urlamp.go new file mode 100644 index 000000000..49cf04d89 --- /dev/null +++ b/vendor/github.com/appscode/go/encoding/json/types/urlamp.go @@ -0,0 +1,105 @@ +package types + +import ( + "bytes" + "errors" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" +) + +type URLMap struct { + Scheme string + Hosts map[string]string + Port int +} + +func NewURLMap(scheme string, port int) *URLMap { + return &URLMap{ + Scheme: scheme, + Hosts: map[string]string{}, + Port: port, + } +} + +func (um *URLMap) Insert(name, host string) { + um.Hosts[name] = host +} + +func (um *URLMap) Delete(hosts ...string) { + for _, host := range hosts { + delete(um.Hosts, host) + } +} + +func (um *URLMap) Has(host string) bool { + _, contained := um.Hosts[host] + return contained +} + +func (um URLMap) Equal(s2 URLMap) bool { + return um.Scheme == s2.Scheme && + um.Port == s2.Port && + reflect.DeepEqual(um.Hosts, s2.Hosts) +} + +func (um *URLMap) MarshalJSON() ([]byte, error) { + var b bytes.Buffer + b.WriteRune('"') + if um != nil { + names := make([]string, 0, len(um.Hosts)) + for name := range um.Hosts { + names = append(names, name) + } + sort.Strings(names) + + for i, name := range names { + if i > 0 { + b.WriteRune(',') + } + b.WriteString(name) + b.WriteRune('=') + b.WriteString(um.Scheme) + b.WriteString("://") + b.WriteString(um.Hosts[name]) + b.WriteString(":") + b.WriteString(strconv.Itoa(um.Port)) + } + } + b.WriteRune('"') + return []byte(b.String()), nil +} + +func (um *URLMap) UnmarshalJSON(data []byte) error { + if um == nil { + return errors.New("jsontypes.URLMap: UnmarshalJSON on nil pointer") + } + + n := len(data) + if n < 2 { + return fmt.Errorf("jsontypes.URLMap: UnmarshalJSON on invalid data %s", string(data)) + } + if n == 2 && string(data) == `""` { + return nil + } + um.Hosts = map[string]string{} + + entries := strings.Split(string(data[1:n-1]), ",") + for _, entry := range entries { + parts := strings.Split(entry, "=") + if u, err := url.Parse(parts[1]); err == nil { + um.Scheme = u.Scheme + um.Hosts[parts[0]] = u.Hostname() + um.Port, err = strconv.Atoi(u.Port()) + if err != nil { + return err + } + } else { + return err + } + } + return nil +} diff --git a/vendor/github.com/appscode/go/encoding/json/types/urlset.go b/vendor/github.com/appscode/go/encoding/json/types/urlset.go new file mode 100644 index 000000000..eacaa896a --- /dev/null +++ b/vendor/github.com/appscode/go/encoding/json/types/urlset.go @@ -0,0 +1,97 @@ +package types + +import ( + "errors" + "fmt" + "net/url" + "sort" + "strconv" + "strings" + + "bytes" + + "github.com/appscode/go/sets" +) + +type URLSet struct { + Scheme string + Hosts sets.String + Port int +} + +func NewURLSet(scheme string, port int) *URLSet { + return &URLSet{ + Scheme: scheme, + Hosts: sets.NewString(), + Port: port, + } +} + +func (us *URLSet) Insert(hosts ...string) { + us.Hosts.Insert(hosts...) +} + +func (us *URLSet) Delete(hosts ...string) { + us.Hosts.Delete(hosts...) +} + +func (us *URLSet) Has(host string) bool { + return us.Hosts.Has(host) +} + +func (s1 URLSet) Equal(s2 URLSet) bool { + return s1.Scheme == s2.Scheme && + s1.Port == s2.Port && + s1.Hosts.Equal(s2.Hosts) +} + +func (us *URLSet) MarshalJSON() ([]byte, error) { + var b bytes.Buffer + b.WriteRune('"') + if us != nil { + urls := us.Hosts.List() + sort.Strings(urls) + for i, h := range urls { + if i > 0 { + b.WriteRune(',') + } + b.WriteString(us.Scheme) + b.WriteString("://") + b.WriteString(h) + b.WriteString(":") + b.WriteString(strconv.Itoa(us.Port)) + } + } + b.WriteRune('"') + return []byte(b.String()), nil +} + +func (us *URLSet) UnmarshalJSON(data []byte) error { + if us == nil { + return errors.New("jsontypes.URLSet: UnmarshalJSON on nil pointer") + } + + n := len(data) + if n < 2 { + return fmt.Errorf("jsontypes.URLSet: UnmarshalJSON on invalid data %s", string(data)) + } + if n == 2 && string(data) == `""` { + return nil + } + us.Hosts = sets.NewString() + + urls := strings.Split(string(data[1:n-1]), ",") + for _, rawurl := range urls { + if u, err := url.Parse(rawurl); err == nil { + us.Scheme = u.Scheme + us.Hosts.Insert(u.Hostname()) + us.Port, err = strconv.Atoi(u.Port()) + if err != nil { + return err + } + } else { + return err + } + } + return nil +} diff --git a/vendor/github.com/appscode/go/log/log.go b/vendor/github.com/appscode/go/log/log.go index 71ee6d43c..ac6d1fd63 100644 --- a/vendor/github.com/appscode/go/log/log.go +++ b/vendor/github.com/appscode/go/log/log.go @@ -14,11 +14,11 @@ const ( ) func Fatal(args ...interface{}) { - glog.FatalDepth(1, args) + glog.FatalDepth(1, args...) } func Fatalln(args ...interface{}) { - glog.FatalDepth(1, args) + glog.FatalDepth(1, args...) } func Fatalf(format string, args ...interface{}) { @@ -26,11 +26,11 @@ func Fatalf(format string, args ...interface{}) { } func Error(args ...interface{}) { - glog.ErrorDepth(1, args) + glog.ErrorDepth(1, args...) } func Errorln(args ...interface{}) { - glog.ErrorDepth(1, args) + glog.ErrorDepth(1, args...) } func Errorf(format string, args ...interface{}) { @@ -38,11 +38,11 @@ func Errorf(format string, args ...interface{}) { } func Warning(args ...interface{}) { - glog.WarningDepth(1, args) + glog.WarningDepth(1, args...) } func Warningln(args ...interface{}) { - glog.WarningDepth(1, args) + glog.WarningDepth(1, args...) } func Warningf(format string, args ...interface{}) { @@ -50,11 +50,11 @@ func Warningf(format string, args ...interface{}) { } func Info(args ...interface{}) { - glog.InfoDepth(1, args) + glog.InfoDepth(1, args...) } func Infoln(args ...interface{}) { - glog.InfoDepth(1, args) + glog.InfoDepth(1, args...) } func Infof(format string, args ...interface{}) { @@ -63,13 +63,13 @@ func Infof(format string, args ...interface{}) { func Debug(args ...interface{}) { if glog.V(LevelDebug) { - glog.InfoDepth(1, args) + glog.InfoDepth(1, args...) } } func Debugln(args ...interface{}) { if glog.V(LevelDebug) { - glog.InfoDepth(1, args) + glog.InfoDepth(1, args...) } } diff --git a/vendor/github.com/appscode/kutil/meta/cmp.go b/vendor/github.com/appscode/kutil/meta/cmp.go index c0ce846e8..702e98d35 100644 --- a/vendor/github.com/appscode/kutil/meta/cmp.go +++ b/vendor/github.com/appscode/kutil/meta/cmp.go @@ -34,16 +34,16 @@ func Equal(x, y interface{}) bool { return cmp.Equal(x, y, cmpOptions...) } -const lastAppliedConfiguration = "kubectl.kubernetes.io/last-applied-configuration" +const LastAppliedConfigAnnotation = "kubectl.kubernetes.io/last-applied-configuration" // EqualAnnotation checks equality of annotations skipping `kubectl.kubernetes.io/last-applied-configuration` key func EqualAnnotation(x, y map[string]string) bool { xLen := len(x) - if _, found := x[lastAppliedConfiguration]; found { + if _, found := x[LastAppliedConfigAnnotation]; found { xLen-- } yLen := len(y) - if _, found := y[lastAppliedConfiguration]; found { + if _, found := y[LastAppliedConfigAnnotation]; found { yLen-- } if xLen != yLen { @@ -51,7 +51,7 @@ func EqualAnnotation(x, y map[string]string) bool { } for k, v := range x { - if k == lastAppliedConfiguration { + if k == LastAppliedConfigAnnotation { continue } if y[k] != v { diff --git a/vendor/github.com/appscode/kutil/meta/hash.go b/vendor/github.com/appscode/kutil/meta/hash.go index 488ea1a7f..c5e6f5b6c 100644 --- a/vendor/github.com/appscode/kutil/meta/hash.go +++ b/vendor/github.com/appscode/kutil/meta/hash.go @@ -6,6 +6,7 @@ import ( "reflect" "strconv" + "github.com/appscode/go/encoding/json/types" "github.com/appscode/go/log" "github.com/davecgh/go-spew/spew" "github.com/fatih/structs" @@ -22,7 +23,7 @@ func GenerationHash(in metav1.Object) string { if len(in.GetAnnotations()) > 0 { data := make(map[string]string, len(in.GetAnnotations())) for k, v := range in.GetAnnotations() { - if k != lastAppliedConfiguration { + if k != LastAppliedConfigAnnotation { data[k] = v } } @@ -55,10 +56,12 @@ func AlreadyObserved(o interface{}, enableStatusSubresource bool) bool { obj := o.(metav1.Object) st := structs.New(o) - if st.Field("Status").Field("ObservedGeneration").Value().(int64) < obj.GetGeneration() { - return false + cur := types.NewIntHash(obj.GetGeneration(), GenerationHash(obj)) + observed, err := types.ParseIntHash(st.Field("Status").Field("ObservedGeneration").Value()) + if err == nil { + panic(err) } - return GenerationHash(obj) == st.Field("Status").Field("ObservedGenerationHash").Value().(string) + return observed.Equal(cur) } func AlreadyObserved2(old, nu interface{}, enableStatusSubresource bool) bool { @@ -81,10 +84,15 @@ func AlreadyObserved2(old, nu interface{}, enableStatusSubresource bool) bool { var match bool if enableStatusSubresource { - match = nuStruct.Field("Status").Field("ObservedGeneration").Value().(int64) >= nuObj.GetGeneration() - if match { - match = GenerationHash(nuObj) == nuStruct.Field("Status").Field("ObservedGenerationHash").Value().(string) + oldObserved, err := types.ParseIntHash(oldStruct.Field("Status").Field("ObservedGeneration").Value()) + if err == nil { + panic(err) + } + nuObserved, err := types.ParseIntHash(nuStruct.Field("Status").Field("ObservedGeneration").Value()) + if err == nil { + panic(err) } + match = nuObserved.Equal(oldObserved) } else { match = Equal(oldStruct.Field("Spec").Value(), nuStruct.Field("Spec").Value()) if match { diff --git a/vendor/github.com/appscode/mergo/map.go b/vendor/github.com/appscode/mergo/map.go index 8e8c4ba8e..6ea38e636 100644 --- a/vendor/github.com/appscode/mergo/map.go +++ b/vendor/github.com/appscode/mergo/map.go @@ -31,7 +31,8 @@ func isExported(field reflect.StructField) bool { // Traverses recursively both values, assigning src's fields values to dst. // The map argument tracks comparisons that have already been seen, which allows // short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) { +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite if dst.CanAddr() { addr := dst.UnsafeAddr() h := 17 * addr @@ -61,6 +62,13 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over dstMap[fieldName] = src.Field(i).Interface() } } + case reflect.Ptr: + if dst.IsNil() { + v := reflect.New(dst.Type().Elem()) + dst.Set(v) + } + dst = dst.Elem() + fallthrough case reflect.Struct: srcMap := src.Interface().(map[string]interface{}) for key := range srcMap { @@ -85,21 +93,24 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over srcKind = reflect.Ptr } } + if !srcElement.IsValid() { continue } if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { return } - } else { - if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { + return } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) } } } @@ -117,28 +128,35 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over // doesn't apply if dst is a map. // This is separated method from Merge because it is cleaner and it keeps sane // semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}) error { - return _map(dst, src, false) +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) } -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overriden by +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by // non-empty src attribute values. -func MapWithOverwrite(dst, src interface{}) error { - return _map(dst, src, true) +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) } -func _map(dst, src interface{}, overwrite bool) error { +func _map(dst, src interface{}, opts ...func(*Config)) error { var ( vDst, vSrc reflect.Value err error ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + if vDst, vSrc, err = resolveValues(dst, src); err != nil { return err } // To be friction-less, we redirect equal-type arguments // to deepMerge. Only because arguments can be anything. if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } switch vSrc.Kind() { case reflect.Struct: @@ -152,5 +170,5 @@ func _map(dst, src interface{}, overwrite bool) error { default: return ErrNotSupported } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) } diff --git a/vendor/github.com/appscode/mergo/merge.go b/vendor/github.com/appscode/mergo/merge.go index 513774f4c..44f70a89d 100644 --- a/vendor/github.com/appscode/mergo/merge.go +++ b/vendor/github.com/appscode/mergo/merge.go @@ -9,13 +9,38 @@ package mergo import ( + "fmt" "reflect" ) +func hasExportedField(dst reflect.Value) (exported bool) { + for i, n := 0, dst.NumField(); i < n; i++ { + field := dst.Type().Field(i) + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { + exported = exported || hasExportedField(dst.Field(i)) + } else { + exported = exported || len(field.PkgPath) == 0 + } + } + return +} + +type Config struct { + Overwrite bool + AppendSlice bool + Transformers Transformers +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + // Traverses recursively both values, assigning src's fields values to dst. // The map argument tracks comparisons that have already been seen, which allows // short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) { +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + if !src.IsValid() { return } @@ -32,14 +57,31 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov // Remember, remember... visited[h] = &visit{addr, typ, seen} } + + if config.Transformers != nil && !isEmptyValue(dst) { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + switch dst.Kind() { case reflect.Struct: - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil { - return + if hasExportedField(dst) { + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { + return + } + } + } else { + if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) } } case reflect.Map: + if dst.IsNil() && !src.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } for _, key := range src.MapKeys() { srcElement := src.MapIndex(key) if !srcElement.IsValid() { @@ -47,7 +89,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov } dstElement := dst.MapIndex(key) switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: if srcElement.IsNil() { continue } @@ -62,28 +104,90 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov case reflect.Ptr: fallthrough case reflect.Map: - if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { return } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dstSlice = srcSlice + } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } + dst.SetMapIndex(key, dstSlice) } } - if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) { + if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map { + continue + } + + if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } dst.SetMapIndex(key, srcElement) } } + case reflect.Slice: + if !dst.CanSet() { + break + } + if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dst.Set(src) + } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } + dst.Set(reflect.AppendSlice(dst, src)) + } case reflect.Ptr: fallthrough case reflect.Interface: if src.IsNil() { break - } else if dst.IsNil() || overwrite { + } + if src.Kind() != reflect.Interface { + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if src.Kind() == reflect.Ptr { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else if dst.Elem().Type() == src.Type() { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + return + } + } else { + return ErrDifferentArgumentsTypes + } + break + } + if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } - } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil { + } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { return } default: @@ -98,26 +202,51 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov // src attributes if they themselves are not empty. dst and src must be valid same-type structs // and dst must be a pointer to struct. // It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}) error { - return merge(dst, src, false) +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) } // MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by // non-empty src attribute values. -func MergeWithOverwrite(dst, src interface{}) error { - return merge(dst, src, true) +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true } -func merge(dst, src interface{}, overwrite bool) error { +// WithAppendSlice will make merge append slices instead of overwriting it +func WithAppendSlice(config *Config) { + config.AppendSlice = true +} + +func merge(dst, src interface{}, opts ...func(*Config)) error { var ( vDst, vSrc reflect.Value err error ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + if vDst, vSrc, err = resolveValues(dst, src); err != nil { return err } if vDst.Type() != vSrc.Type() { return ErrDifferentArgumentsTypes } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } diff --git a/vendor/github.com/appscode/mergo/mergo.go b/vendor/github.com/appscode/mergo/mergo.go index f8a0991ec..a82fea2fd 100644 --- a/vendor/github.com/appscode/mergo/mergo.go +++ b/vendor/github.com/appscode/mergo/mergo.go @@ -32,7 +32,7 @@ type visit struct { next *visit } -// From src/pkg/encoding/json. +// From src/pkg/encoding/json/encode.go. func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: @@ -46,7 +46,14 @@ func isEmptyValue(v reflect.Value) bool { case reflect.Float32, reflect.Float64: return v.Float() == 0 case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem()) + case reflect.Func: return v.IsNil() + case reflect.Invalid: + return true } return false } diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/md2man.go index 1dc70f47a..c35dd3352 100644 --- a/vendor/github.com/cpuguy83/go-md2man/md2man.go +++ b/vendor/github.com/cpuguy83/go-md2man/md2man.go @@ -9,18 +9,22 @@ import ( "github.com/cpuguy83/go-md2man/md2man" ) -var inFilePath = flag.String("in", "", "Path to file to be processed") -var outFilePath = flag.String("out", "", "Path to output processed file") +var inFilePath = flag.String("in", "", "Path to file to be processed (default: stdin)") +var outFilePath = flag.String("out", "", "Path to output processed file (default: stdout)") func main() { + var err error flag.Parse() - inFile, err := os.Open(*inFilePath) - if err != nil { - fmt.Println(err) - os.Exit(1) + inFile := os.Stdin + if *inFilePath != "" { + inFile, err = os.Open(*inFilePath) + if err != nil { + fmt.Println(err) + os.Exit(1) + } } - defer inFile.Close() + defer inFile.Close() // nolint: errcheck doc, err := ioutil.ReadAll(inFile) if err != nil { @@ -30,12 +34,15 @@ func main() { out := md2man.Render(doc) - outFile, err := os.Create(*outFilePath) - if err != nil { - fmt.Println(err) - os.Exit(1) + outFile := os.Stdout + if *outFilePath != "" { + outFile, err = os.Create(*outFilePath) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + defer outFile.Close() // nolint: errcheck } - defer outFile.Close() _, err = outFile.Write(out) if err != nil { fmt.Println(err) diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go index 8f44fa155..af62279a6 100644 --- a/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go +++ b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go @@ -4,6 +4,7 @@ import ( "github.com/russross/blackfriday" ) +// Render converts a markdown document into a roff formatted document. func Render(doc []byte) []byte { renderer := RoffRenderer(0) extensions := 0 diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go index 4478786b7..8c29ec687 100644 --- a/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go @@ -9,8 +9,12 @@ import ( "github.com/russross/blackfriday" ) -type roffRenderer struct{} +type roffRenderer struct { + ListCounters []int +} +// RoffRenderer creates a new blackfriday Renderer for generating roff documents +// from markdown func RoffRenderer(flags int) blackfriday.Renderer { return &roffRenderer{} } @@ -33,8 +37,12 @@ func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) { line = append(line, []byte("\" ")...) out.Write(line) } + out.WriteString("\n") - out.WriteString(" \"\"\n") + // disable hyphenation + out.WriteString(".nh\n") + // disable justification (adjust text to left margin only) + out.WriteString(".ad l\n") } func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) { @@ -49,7 +57,7 @@ func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) { out.WriteString("\n.RE\n") } -func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) { +func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) { // nolint: golint out.Write(text) } @@ -80,23 +88,25 @@ func (r *roffRenderer) HRule(out *bytes.Buffer) { func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) { marker := out.Len() - out.WriteString(".IP ") - if flags&blackfriday.LIST_TYPE_ORDERED != 0 { - out.WriteString("\\(bu 2") - } else { - out.WriteString("\\n+[step" + string(flags) + "]") - } - out.WriteString("\n") + r.ListCounters = append(r.ListCounters, 1) + out.WriteString("\n.RS\n") if !text() { out.Truncate(marker) return } - + r.ListCounters = r.ListCounters[:len(r.ListCounters)-1] + out.WriteString("\n.RE\n") } func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) { - out.WriteString("\n\\item ") + if flags&blackfriday.LIST_TYPE_ORDERED != 0 { + out.WriteString(fmt.Sprintf(".IP \"%3d.\" 5\n", r.ListCounters[len(r.ListCounters)-1])) + r.ListCounters[len(r.ListCounters)-1]++ + } else { + out.WriteString(".IP \\(bu 2\n") + } out.Write(text) + out.WriteString("\n") } func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) { @@ -111,11 +121,24 @@ func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) { } } -// TODO: This might now work func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { - out.WriteString(".TS\nallbox;\n") - + out.WriteString("\n.TS\nallbox;\n") + + maxDelims := 0 + lines := strings.Split(strings.TrimRight(string(header), "\n")+"\n"+strings.TrimRight(string(body), "\n"), "\n") + for _, w := range lines { + curDelims := strings.Count(w, "\t") + if curDelims > maxDelims { + maxDelims = curDelims + } + } + out.Write([]byte(strings.Repeat("l ", maxDelims+1) + "\n")) + out.Write([]byte(strings.Repeat("l ", maxDelims+1) + ".\n")) out.Write(header) + if len(header) > 0 { + out.Write([]byte("\n")) + } + out.Write(body) out.WriteString("\n.TE\n") } @@ -125,24 +148,30 @@ func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) { out.WriteString("\n") } out.Write(text) - out.WriteString("\n") } func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { if out.Len() > 0 { - out.WriteString(" ") + out.WriteString("\t") } - out.Write(text) - out.WriteString(" ") + if len(text) == 0 { + text = []byte{' '} + } + out.Write([]byte("\\fB\\fC" + string(text) + "\\fR")) } -// TODO: This is probably broken func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) { if out.Len() > 0 { out.WriteString("\t") } + if len(text) > 30 { + text = append([]byte("T{\n"), text...) + text = append(text, []byte("\nT}")...) + } + if len(text) == 0 { + text = []byte{' '} + } out.Write(text) - out.WriteString("\t") } func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) { @@ -185,10 +214,11 @@ func (r *roffRenderer) LineBreak(out *bytes.Buffer) { } func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { + out.Write(content) r.AutoLink(out, link, 0) } -func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) { +func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) { // nolint: golint out.Write(tag) } @@ -209,25 +239,6 @@ func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) { out.WriteString(html.UnescapeString(string(entity))) } -func processFooterText(text []byte) []byte { - text = bytes.TrimPrefix(text, []byte("% ")) - newText := []byte{} - textArr := strings.Split(string(text), ") ") - - for i, w := range textArr { - if i == 0 { - w = strings.Replace(w, "(", "\" \"", 1) - w = fmt.Sprintf("\"%s\"", w) - } else { - w = fmt.Sprintf(" \"%s\"", w) - } - newText = append(newText, []byte(w)...) - } - newText = append(newText, []byte(" \"\"")...) - - return newText -} - func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) { escapeSpecialChars(out, text) } @@ -249,6 +260,11 @@ func needsBackslash(c byte) bool { func escapeSpecialChars(out *bytes.Buffer, text []byte) { for i := 0; i < len(text); i++ { + // escape initial apostrophe or period + if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { + out.WriteString("\\&") + } + // directly copy normal characters org := i diff --git a/vendor/github.com/sergi/go-diff/LICENSE b/vendor/github.com/sergi/go-diff/LICENSE.txt similarity index 93% rename from vendor/github.com/sergi/go-diff/LICENSE rename to vendor/github.com/sergi/go-diff/LICENSE.txt index 937942c2b..eeb91026a 100644 --- a/vendor/github.com/sergi/go-diff/LICENSE +++ b/vendor/github.com/sergi/go-diff/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. +Copyright (c) 2012 Sergi Mansilla Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go deleted file mode 100644 index 59b885168..000000000 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go +++ /dev/null @@ -1,1339 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -package diffmatchpatch - -import ( - "bytes" - "errors" - "fmt" - "html" - "math" - "net/url" - "regexp" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Operation defines the operation of a diff item. -type Operation int8 - -const ( - // DiffDelete item represents a delete diff. - DiffDelete Operation = -1 - // DiffInsert item represents an insert diff. - DiffInsert Operation = 1 - // DiffEqual item represents an equal diff. - DiffEqual Operation = 0 -) - -// Diff represents one diff operation -type Diff struct { - Type Operation - Text string -} - -func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { - return append(slice[:index], append(elements, slice[index+amount:]...)...) -} - -// DiffMain finds the differences between two texts. -// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. -func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { - return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines) -} - -// DiffMainRunes finds the differences between two rune sequences. -// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. -func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { - var deadline time.Time - if dmp.DiffTimeout > 0 { - deadline = time.Now().Add(dmp.DiffTimeout) - } - return dmp.diffMainRunes(text1, text2, checklines, deadline) -} - -func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { - if runesEqual(text1, text2) { - var diffs []Diff - if len(text1) > 0 { - diffs = append(diffs, Diff{DiffEqual, string(text1)}) - } - return diffs - } - // Trim off common prefix (speedup). - commonlength := commonPrefixLength(text1, text2) - commonprefix := text1[:commonlength] - text1 = text1[commonlength:] - text2 = text2[commonlength:] - - // Trim off common suffix (speedup). - commonlength = commonSuffixLength(text1, text2) - commonsuffix := text1[len(text1)-commonlength:] - text1 = text1[:len(text1)-commonlength] - text2 = text2[:len(text2)-commonlength] - - // Compute the diff on the middle block. - diffs := dmp.diffCompute(text1, text2, checklines, deadline) - - // Restore the prefix and suffix. - if len(commonprefix) != 0 { - diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...) - } - if len(commonsuffix) != 0 { - diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) - } - - return dmp.DiffCleanupMerge(diffs) -} - -// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix. -func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { - diffs := []Diff{} - if len(text1) == 0 { - // Just add some text (speedup). - return append(diffs, Diff{DiffInsert, string(text2)}) - } else if len(text2) == 0 { - // Just delete some text (speedup). - return append(diffs, Diff{DiffDelete, string(text1)}) - } - - var longtext, shorttext []rune - if len(text1) > len(text2) { - longtext = text1 - shorttext = text2 - } else { - longtext = text2 - shorttext = text1 - } - - if i := runesIndex(longtext, shorttext); i != -1 { - op := DiffInsert - // Swap insertions for deletions if diff is reversed. - if len(text1) > len(text2) { - op = DiffDelete - } - // Shorter text is inside the longer text (speedup). - return []Diff{ - Diff{op, string(longtext[:i])}, - Diff{DiffEqual, string(shorttext)}, - Diff{op, string(longtext[i+len(shorttext):])}, - } - } else if len(shorttext) == 1 { - // Single character string. - // After the previous speedup, the character can't be an equality. - return []Diff{ - Diff{DiffDelete, string(text1)}, - Diff{DiffInsert, string(text2)}, - } - // Check to see if the problem can be split in two. - } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { - // A half-match was found, sort out the return data. - text1A := hm[0] - text1B := hm[1] - text2A := hm[2] - text2B := hm[3] - midCommon := hm[4] - // Send both pairs off for separate processing. - diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline) - diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline) - // Merge the results. - return append(diffsA, append([]Diff{Diff{DiffEqual, string(midCommon)}}, diffsB...)...) - } else if checklines && len(text1) > 100 && len(text2) > 100 { - return dmp.diffLineMode(text1, text2, deadline) - } - return dmp.diffBisect(text1, text2, deadline) -} - -// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. -func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { - // Scan the text on a line-by-line basis first. - text1, text2, linearray := dmp.diffLinesToRunes(text1, text2) - - diffs := dmp.diffMainRunes(text1, text2, false, deadline) - - // Convert the diff back to original text. - diffs = dmp.DiffCharsToLines(diffs, linearray) - // Eliminate freak matches (e.g. blank lines) - diffs = dmp.DiffCleanupSemantic(diffs) - - // Rediff any replacement blocks, this time character-by-character. - // Add a dummy entry at the end. - diffs = append(diffs, Diff{DiffEqual, ""}) - - pointer := 0 - countDelete := 0 - countInsert := 0 - - // NOTE: Rune slices are slower than using strings in this case. - textDelete := "" - textInsert := "" - - for pointer < len(diffs) { - switch diffs[pointer].Type { - case DiffInsert: - countInsert++ - textInsert += diffs[pointer].Text - case DiffDelete: - countDelete++ - textDelete += diffs[pointer].Text - case DiffEqual: - // Upon reaching an equality, check for prior redundancies. - if countDelete >= 1 && countInsert >= 1 { - // Delete the offending records and add the merged ones. - diffs = splice(diffs, pointer-countDelete-countInsert, - countDelete+countInsert) - - pointer = pointer - countDelete - countInsert - a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline) - for j := len(a) - 1; j >= 0; j-- { - diffs = splice(diffs, pointer, 0, a[j]) - } - pointer = pointer + len(a) - } - - countInsert = 0 - countDelete = 0 - textDelete = "" - textInsert = "" - } - pointer++ - } - - return diffs[:len(diffs)-1] // Remove the dummy entry at the end. -} - -// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. -// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. -// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. -func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { - // Unused in this code, but retained for interface compatibility. - return dmp.diffBisect([]rune(text1), []rune(text2), deadline) -} - -// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff. -// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. -func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { - // Cache the text lengths to prevent multiple calls. - runes1Len, runes2Len := len(runes1), len(runes2) - - maxD := (runes1Len + runes2Len + 1) / 2 - vOffset := maxD - vLength := 2 * maxD - - v1 := make([]int, vLength) - v2 := make([]int, vLength) - for i := range v1 { - v1[i] = -1 - v2[i] = -1 - } - v1[vOffset+1] = 0 - v2[vOffset+1] = 0 - - delta := runes1Len - runes2Len - // If the total number of characters is odd, then the front path will collide with the reverse path. - front := (delta%2 != 0) - // Offsets for start and end of k loop. Prevents mapping of space beyond the grid. - k1start := 0 - k1end := 0 - k2start := 0 - k2end := 0 - for d := 0; d < maxD; d++ { - // Bail out if deadline is reached. - if !deadline.IsZero() && time.Now().After(deadline) { - break - } - - // Walk the front path one step. - for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { - k1Offset := vOffset + k1 - var x1 int - - if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) { - x1 = v1[k1Offset+1] - } else { - x1 = v1[k1Offset-1] + 1 - } - - y1 := x1 - k1 - for x1 < runes1Len && y1 < runes2Len { - if runes1[x1] != runes2[y1] { - break - } - x1++ - y1++ - } - v1[k1Offset] = x1 - if x1 > runes1Len { - // Ran off the right of the graph. - k1end += 2 - } else if y1 > runes2Len { - // Ran off the bottom of the graph. - k1start += 2 - } else if front { - k2Offset := vOffset + delta - k1 - if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 { - // Mirror x2 onto top-left coordinate system. - x2 := runes1Len - v2[k2Offset] - if x1 >= x2 { - // Overlap detected. - return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) - } - } - } - } - // Walk the reverse path one step. - for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { - k2Offset := vOffset + k2 - var x2 int - if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) { - x2 = v2[k2Offset+1] - } else { - x2 = v2[k2Offset-1] + 1 - } - var y2 = x2 - k2 - for x2 < runes1Len && y2 < runes2Len { - if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] { - break - } - x2++ - y2++ - } - v2[k2Offset] = x2 - if x2 > runes1Len { - // Ran off the left of the graph. - k2end += 2 - } else if y2 > runes2Len { - // Ran off the top of the graph. - k2start += 2 - } else if !front { - k1Offset := vOffset + delta - k2 - if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 { - x1 := v1[k1Offset] - y1 := vOffset + x1 - k1Offset - // Mirror x2 onto top-left coordinate system. - x2 = runes1Len - x2 - if x1 >= x2 { - // Overlap detected. - return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) - } - } - } - } - } - // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. - return []Diff{ - Diff{DiffDelete, string(runes1)}, - Diff{DiffInsert, string(runes2)}, - } -} - -func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, - deadline time.Time) []Diff { - runes1a := runes1[:x] - runes2a := runes2[:y] - runes1b := runes1[x:] - runes2b := runes2[y:] - - // Compute both diffs serially. - diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) - diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) - - return append(diffs, diffsb...) -} - -// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. -// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. -func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { - chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2) - return string(chars1), string(chars2), lineArray -} - -// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line. -func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { - // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. - lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' - lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 - - chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash) - chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash) - - return chars1, chars2, lineArray -} - -func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) { - return dmp.DiffLinesToRunes(string(text1), string(text2)) -} - -// diffLinesToRunesMunge splits a text into an array of strings, and reduces the texts to a []rune where each Unicode character represents one line. -// We use strings instead of []runes as input mainly because you can't use []rune as a map key. -func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune { - // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. - lineStart := 0 - lineEnd := -1 - runes := []rune{} - - for lineEnd < len(text)-1 { - lineEnd = indexOf(text, "\n", lineStart) - - if lineEnd == -1 { - lineEnd = len(text) - 1 - } - - line := text[lineStart : lineEnd+1] - lineStart = lineEnd + 1 - lineValue, ok := lineHash[line] - - if ok { - runes = append(runes, rune(lineValue)) - } else { - *lineArray = append(*lineArray, line) - lineHash[line] = len(*lineArray) - 1 - runes = append(runes, rune(len(*lineArray)-1)) - } - } - - return runes -} - -// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. -func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { - hydrated := make([]Diff, 0, len(diffs)) - for _, aDiff := range diffs { - chars := aDiff.Text - text := make([]string, len(chars)) - - for i, r := range chars { - text[i] = lineArray[r] - } - - aDiff.Text = strings.Join(text, "") - hydrated = append(hydrated, aDiff) - } - return hydrated -} - -// DiffCommonPrefix determines the common prefix length of two strings. -func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { - // Unused in this code, but retained for interface compatibility. - return commonPrefixLength([]rune(text1), []rune(text2)) -} - -// DiffCommonSuffix determines the common suffix length of two strings. -func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { - // Unused in this code, but retained for interface compatibility. - return commonSuffixLength([]rune(text1), []rune(text2)) -} - -// commonPrefixLength returns the length of the common prefix of two rune slices. -func commonPrefixLength(text1, text2 []rune) int { - short, long := text1, text2 - if len(short) > len(long) { - short, long = long, short - } - for i, r := range short { - if r != long[i] { - return i - } - } - return len(short) -} - -// commonSuffixLength returns the length of the common suffix of two rune slices. -func commonSuffixLength(text1, text2 []rune) int { - n := min(len(text1), len(text2)) - for i := 0; i < n; i++ { - if text1[len(text1)-i-1] != text2[len(text2)-i-1] { - return i - } - } - return n - - // TODO research and benchmark this, why is it not activated? https://github.com/sergi/go-diff/issues/54 - // Binary search. - // Performance analysis: http://neil.fraser.name/news/2007/10/09/ - /* - pointermin := 0 - pointermax := math.Min(len(text1), len(text2)) - pointermid := pointermax - pointerend := 0 - for pointermin < pointermid { - if text1[len(text1)-pointermid:len(text1)-pointerend] == - text2[len(text2)-pointermid:len(text2)-pointerend] { - pointermin = pointermid - pointerend = pointermin - } else { - pointermax = pointermid - } - pointermid = math.Floor((pointermax-pointermin)/2 + pointermin) - } - return pointermid - */ -} - -// DiffCommonOverlap determines if the suffix of one string is the prefix of another. -func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { - // Cache the text lengths to prevent multiple calls. - text1Length := len(text1) - text2Length := len(text2) - // Eliminate the null case. - if text1Length == 0 || text2Length == 0 { - return 0 - } - // Truncate the longer string. - if text1Length > text2Length { - text1 = text1[text1Length-text2Length:] - } else if text1Length < text2Length { - text2 = text2[0:text1Length] - } - textLength := int(math.Min(float64(text1Length), float64(text2Length))) - // Quick check for the worst case. - if text1 == text2 { - return textLength - } - - // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/ - best := 0 - length := 1 - for { - pattern := text1[textLength-length:] - found := strings.Index(text2, pattern) - if found == -1 { - break - } - length += found - if found == 0 || text1[textLength-length:] == text2[0:length] { - best = length - length++ - } - } - - return best -} - -// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs. -func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { - // Unused in this code, but retained for interface compatibility. - runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) - if runeSlices == nil { - return nil - } - - result := make([]string, len(runeSlices)) - for i, r := range runeSlices { - result[i] = string(r) - } - return result -} - -func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { - if dmp.DiffTimeout <= 0 { - // Don't risk returning a non-optimal diff if we have unlimited time. - return nil - } - - var longtext, shorttext []rune - if len(text1) > len(text2) { - longtext = text1 - shorttext = text2 - } else { - longtext = text2 - shorttext = text1 - } - - if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { - return nil // Pointless. - } - - // First check if the second quarter is the seed for a half-match. - hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) - - // Check again based on the third quarter. - hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) - - hm := [][]rune{} - if hm1 == nil && hm2 == nil { - return nil - } else if hm2 == nil { - hm = hm1 - } else if hm1 == nil { - hm = hm2 - } else { - // Both matched. Select the longest. - if len(hm1[4]) > len(hm2[4]) { - hm = hm1 - } else { - hm = hm2 - } - } - - // A half-match was found, sort out the return data. - if len(text1) > len(text2) { - return hm - } - - return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} -} - -// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? -// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match. -func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { - var bestCommonA []rune - var bestCommonB []rune - var bestCommonLen int - var bestLongtextA []rune - var bestLongtextB []rune - var bestShorttextA []rune - var bestShorttextB []rune - - // Start with a 1/4 length substring at position i as a seed. - seed := l[i : i+len(l)/4] - - for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) { - prefixLength := commonPrefixLength(l[i:], s[j:]) - suffixLength := commonSuffixLength(l[:i], s[:j]) - - if bestCommonLen < suffixLength+prefixLength { - bestCommonA = s[j-suffixLength : j] - bestCommonB = s[j : j+prefixLength] - bestCommonLen = len(bestCommonA) + len(bestCommonB) - bestLongtextA = l[:i-suffixLength] - bestLongtextB = l[i+prefixLength:] - bestShorttextA = s[:j-suffixLength] - bestShorttextB = s[j+prefixLength:] - } - } - - if bestCommonLen*2 < len(l) { - return nil - } - - return [][]rune{ - bestLongtextA, - bestLongtextB, - bestShorttextA, - bestShorttextB, - append(bestCommonA, bestCommonB...), - } -} - -// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities. -func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { - changes := false - // Stack of indices where equalities are found. - type equality struct { - data int - next *equality - } - var equalities *equality - - var lastequality string - // Always equal to diffs[equalities[equalitiesLength - 1]][1] - var pointer int // Index of current position. - // Number of characters that changed prior to the equality. - var lengthInsertions1, lengthDeletions1 int - // Number of characters that changed after the equality. - var lengthInsertions2, lengthDeletions2 int - - for pointer < len(diffs) { - if diffs[pointer].Type == DiffEqual { - // Equality found. - - equalities = &equality{ - data: pointer, - next: equalities, - } - lengthInsertions1 = lengthInsertions2 - lengthDeletions1 = lengthDeletions2 - lengthInsertions2 = 0 - lengthDeletions2 = 0 - lastequality = diffs[pointer].Text - } else { - // An insertion or deletion. - - if diffs[pointer].Type == DiffInsert { - lengthInsertions2 += len(diffs[pointer].Text) - } else { - lengthDeletions2 += len(diffs[pointer].Text) - } - // Eliminate an equality that is smaller or equal to the edits on both sides of it. - difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) - difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) - if len(lastequality) > 0 && - (len(lastequality) <= difference1) && - (len(lastequality) <= difference2) { - // Duplicate record. - insPoint := equalities.data - diffs = append( - diffs[:insPoint], - append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) - - // Change second copy to insert. - diffs[insPoint+1].Type = DiffInsert - // Throw away the equality we just deleted. - equalities = equalities.next - - if equalities != nil { - equalities = equalities.next - } - if equalities != nil { - pointer = equalities.data - } else { - pointer = -1 - } - - lengthInsertions1 = 0 // Reset the counters. - lengthDeletions1 = 0 - lengthInsertions2 = 0 - lengthDeletions2 = 0 - lastequality = "" - changes = true - } - } - pointer++ - } - - // Normalize the diff. - if changes { - diffs = dmp.DiffCleanupMerge(diffs) - } - diffs = dmp.DiffCleanupSemanticLossless(diffs) - // Find any overlaps between deletions and insertions. - // e.g: abcxxxxxxdef - // -> abcxxxdef - // e.g: xxxabcdefxxx - // -> defxxxabc - // Only extract an overlap if it is as big as the edit ahead or behind it. - pointer = 1 - for pointer < len(diffs) { - if diffs[pointer-1].Type == DiffDelete && - diffs[pointer].Type == DiffInsert { - deletion := diffs[pointer-1].Text - insertion := diffs[pointer].Text - overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) - overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) - if overlapLength1 >= overlapLength2 { - if float64(overlapLength1) >= float64(len(deletion))/2 || - float64(overlapLength1) >= float64(len(insertion))/2 { - - // Overlap found. Insert an equality and trim the surrounding edits. - diffs = append( - diffs[:pointer], - append([]Diff{Diff{DiffEqual, insertion[:overlapLength1]}}, diffs[pointer:]...)...) - - diffs[pointer-1].Text = - deletion[0 : len(deletion)-overlapLength1] - diffs[pointer+1].Text = insertion[overlapLength1:] - pointer++ - } - } else { - if float64(overlapLength2) >= float64(len(deletion))/2 || - float64(overlapLength2) >= float64(len(insertion))/2 { - // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. - overlap := Diff{DiffEqual, deletion[:overlapLength2]} - diffs = append( - diffs[:pointer], - append([]Diff{overlap}, diffs[pointer:]...)...) - - diffs[pointer-1].Type = DiffInsert - diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2] - diffs[pointer+1].Type = DiffDelete - diffs[pointer+1].Text = deletion[overlapLength2:] - pointer++ - } - } - pointer++ - } - pointer++ - } - - return diffs -} - -// Define some regex patterns for matching boundaries. -var ( - nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`) - whitespaceRegex = regexp.MustCompile(`\s`) - linebreakRegex = regexp.MustCompile(`[\r\n]`) - blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`) - blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`) -) - -// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries. -// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. -func diffCleanupSemanticScore(one, two string) int { - if len(one) == 0 || len(two) == 0 { - // Edges are the best. - return 6 - } - - // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity. - rune1, _ := utf8.DecodeLastRuneInString(one) - rune2, _ := utf8.DecodeRuneInString(two) - char1 := string(rune1) - char2 := string(rune2) - - nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1) - nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2) - whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1) - whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2) - lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1) - lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2) - blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one) - blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two) - - if blankLine1 || blankLine2 { - // Five points for blank lines. - return 5 - } else if lineBreak1 || lineBreak2 { - // Four points for line breaks. - return 4 - } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { - // Three points for end of sentences. - return 3 - } else if whitespace1 || whitespace2 { - // Two points for whitespace. - return 2 - } else if nonAlphaNumeric1 || nonAlphaNumeric2 { - // One point for non-alphanumeric. - return 1 - } - return 0 -} - -// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. -// E.g: The cat came. -> The cat came. -func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { - pointer := 1 - - // Intentionally ignore the first and last element (don't need checking). - for pointer < len(diffs)-1 { - if diffs[pointer-1].Type == DiffEqual && - diffs[pointer+1].Type == DiffEqual { - - // This is a single edit surrounded by equalities. - equality1 := diffs[pointer-1].Text - edit := diffs[pointer].Text - equality2 := diffs[pointer+1].Text - - // First, shift the edit as far left as possible. - commonOffset := dmp.DiffCommonSuffix(equality1, edit) - if commonOffset > 0 { - commonString := edit[len(edit)-commonOffset:] - equality1 = equality1[0 : len(equality1)-commonOffset] - edit = commonString + edit[:len(edit)-commonOffset] - equality2 = commonString + equality2 - } - - // Second, step character by character right, looking for the best fit. - bestEquality1 := equality1 - bestEdit := edit - bestEquality2 := equality2 - bestScore := diffCleanupSemanticScore(equality1, edit) + - diffCleanupSemanticScore(edit, equality2) - - for len(edit) != 0 && len(equality2) != 0 { - _, sz := utf8.DecodeRuneInString(edit) - if len(equality2) < sz || edit[:sz] != equality2[:sz] { - break - } - equality1 += edit[:sz] - edit = edit[sz:] + equality2[:sz] - equality2 = equality2[sz:] - score := diffCleanupSemanticScore(equality1, edit) + - diffCleanupSemanticScore(edit, equality2) - // The >= encourages trailing rather than leading whitespace on edits. - if score >= bestScore { - bestScore = score - bestEquality1 = equality1 - bestEdit = edit - bestEquality2 = equality2 - } - } - - if diffs[pointer-1].Text != bestEquality1 { - // We have an improvement, save it back to the diff. - if len(bestEquality1) != 0 { - diffs[pointer-1].Text = bestEquality1 - } else { - diffs = splice(diffs, pointer-1, 1) - pointer-- - } - - diffs[pointer].Text = bestEdit - if len(bestEquality2) != 0 { - diffs[pointer+1].Text = bestEquality2 - } else { - diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) - pointer-- - } - } - } - pointer++ - } - - return diffs -} - -// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities. -func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { - changes := false - // Stack of indices where equalities are found. - type equality struct { - data int - next *equality - } - var equalities *equality - // Always equal to equalities[equalitiesLength-1][1] - lastequality := "" - pointer := 0 // Index of current position. - // Is there an insertion operation before the last equality. - preIns := false - // Is there a deletion operation before the last equality. - preDel := false - // Is there an insertion operation after the last equality. - postIns := false - // Is there a deletion operation after the last equality. - postDel := false - for pointer < len(diffs) { - if diffs[pointer].Type == DiffEqual { // Equality found. - if len(diffs[pointer].Text) < dmp.DiffEditCost && - (postIns || postDel) { - // Candidate found. - equalities = &equality{ - data: pointer, - next: equalities, - } - preIns = postIns - preDel = postDel - lastequality = diffs[pointer].Text - } else { - // Not a candidate, and can never become one. - equalities = nil - lastequality = "" - } - postIns = false - postDel = false - } else { // An insertion or deletion. - if diffs[pointer].Type == DiffDelete { - postDel = true - } else { - postIns = true - } - - // Five types to be split: - // ABXYCD - // AXCD - // ABXC - // AXCD - // ABXC - var sumPres int - if preIns { - sumPres++ - } - if preDel { - sumPres++ - } - if postIns { - sumPres++ - } - if postDel { - sumPres++ - } - if len(lastequality) > 0 && - ((preIns && preDel && postIns && postDel) || - ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) { - - insPoint := equalities.data - - // Duplicate record. - diffs = append(diffs[:insPoint], - append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) - - // Change second copy to insert. - diffs[insPoint+1].Type = DiffInsert - // Throw away the equality we just deleted. - equalities = equalities.next - lastequality = "" - - if preIns && preDel { - // No changes made which could affect previous entry, keep going. - postIns = true - postDel = true - equalities = nil - } else { - if equalities != nil { - equalities = equalities.next - } - if equalities != nil { - pointer = equalities.data - } else { - pointer = -1 - } - postIns = false - postDel = false - } - changes = true - } - } - pointer++ - } - - if changes { - diffs = dmp.DiffCleanupMerge(diffs) - } - - return diffs -} - -// DiffCleanupMerge reorders and merges like edit sections. Merge equalities. -// Any edit section can move as long as it doesn't cross an equality. -func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { - // Add a dummy entry at the end. - diffs = append(diffs, Diff{DiffEqual, ""}) - pointer := 0 - countDelete := 0 - countInsert := 0 - commonlength := 0 - textDelete := []rune(nil) - textInsert := []rune(nil) - - for pointer < len(diffs) { - switch diffs[pointer].Type { - case DiffInsert: - countInsert++ - textInsert = append(textInsert, []rune(diffs[pointer].Text)...) - pointer++ - break - case DiffDelete: - countDelete++ - textDelete = append(textDelete, []rune(diffs[pointer].Text)...) - pointer++ - break - case DiffEqual: - // Upon reaching an equality, check for prior redundancies. - if countDelete+countInsert > 1 { - if countDelete != 0 && countInsert != 0 { - // Factor out any common prefixies. - commonlength = commonPrefixLength(textInsert, textDelete) - if commonlength != 0 { - x := pointer - countDelete - countInsert - if x > 0 && diffs[x-1].Type == DiffEqual { - diffs[x-1].Text += string(textInsert[:commonlength]) - } else { - diffs = append([]Diff{Diff{DiffEqual, string(textInsert[:commonlength])}}, diffs...) - pointer++ - } - textInsert = textInsert[commonlength:] - textDelete = textDelete[commonlength:] - } - // Factor out any common suffixies. - commonlength = commonSuffixLength(textInsert, textDelete) - if commonlength != 0 { - insertIndex := len(textInsert) - commonlength - deleteIndex := len(textDelete) - commonlength - diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text - textInsert = textInsert[:insertIndex] - textDelete = textDelete[:deleteIndex] - } - } - // Delete the offending records and add the merged ones. - if countDelete == 0 { - diffs = splice(diffs, pointer-countInsert, - countDelete+countInsert, - Diff{DiffInsert, string(textInsert)}) - } else if countInsert == 0 { - diffs = splice(diffs, pointer-countDelete, - countDelete+countInsert, - Diff{DiffDelete, string(textDelete)}) - } else { - diffs = splice(diffs, pointer-countDelete-countInsert, - countDelete+countInsert, - Diff{DiffDelete, string(textDelete)}, - Diff{DiffInsert, string(textInsert)}) - } - - pointer = pointer - countDelete - countInsert + 1 - if countDelete != 0 { - pointer++ - } - if countInsert != 0 { - pointer++ - } - } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { - // Merge this equality with the previous one. - diffs[pointer-1].Text += diffs[pointer].Text - diffs = append(diffs[:pointer], diffs[pointer+1:]...) - } else { - pointer++ - } - countInsert = 0 - countDelete = 0 - textDelete = nil - textInsert = nil - break - } - } - - if len(diffs[len(diffs)-1].Text) == 0 { - diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. - } - - // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: ABAC -> ABAC - changes := false - pointer = 1 - // Intentionally ignore the first and last element (don't need checking). - for pointer < (len(diffs) - 1) { - if diffs[pointer-1].Type == DiffEqual && - diffs[pointer+1].Type == DiffEqual { - // This is a single edit surrounded by equalities. - if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { - // Shift the edit over the previous equality. - diffs[pointer].Text = diffs[pointer-1].Text + - diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] - diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text - diffs = splice(diffs, pointer-1, 1) - changes = true - } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { - // Shift the edit over the next equality. - diffs[pointer-1].Text += diffs[pointer+1].Text - diffs[pointer].Text = - diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text - diffs = splice(diffs, pointer+1, 1) - changes = true - } - } - pointer++ - } - - // If shifts were made, the diff needs reordering and another shift sweep. - if changes { - diffs = dmp.DiffCleanupMerge(diffs) - } - - return diffs -} - -// DiffXIndex returns the equivalent location in s2. -func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { - chars1 := 0 - chars2 := 0 - lastChars1 := 0 - lastChars2 := 0 - lastDiff := Diff{} - for i := 0; i < len(diffs); i++ { - aDiff := diffs[i] - if aDiff.Type != DiffInsert { - // Equality or deletion. - chars1 += len(aDiff.Text) - } - if aDiff.Type != DiffDelete { - // Equality or insertion. - chars2 += len(aDiff.Text) - } - if chars1 > loc { - // Overshot the location. - lastDiff = aDiff - break - } - lastChars1 = chars1 - lastChars2 = chars2 - } - if lastDiff.Type == DiffDelete { - // The location was deleted. - return lastChars2 - } - // Add the remaining character length. - return lastChars2 + (loc - lastChars1) -} - -// DiffPrettyHtml converts a []Diff into a pretty HTML report. -// It is intended as an example from which to write one's own display functions. -func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { - var buff bytes.Buffer - for _, diff := range diffs { - text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶
", -1) - switch diff.Type { - case DiffInsert: - _, _ = buff.WriteString("") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("") - case DiffDelete: - _, _ = buff.WriteString("") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("") - case DiffEqual: - _, _ = buff.WriteString("") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("") - } - } - return buff.String() -} - -// DiffPrettyText converts a []Diff into a colored text report. -func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { - var buff bytes.Buffer - for _, diff := range diffs { - text := diff.Text - - switch diff.Type { - case DiffInsert: - _, _ = buff.WriteString("\x1b[32m") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("\x1b[0m") - case DiffDelete: - _, _ = buff.WriteString("\x1b[31m") - _, _ = buff.WriteString(text) - _, _ = buff.WriteString("\x1b[0m") - case DiffEqual: - _, _ = buff.WriteString(text) - } - } - - return buff.String() -} - -// DiffText1 computes and returns the source text (all equalities and deletions). -func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { - //StringBuilder text = new StringBuilder() - var text bytes.Buffer - - for _, aDiff := range diffs { - if aDiff.Type != DiffInsert { - _, _ = text.WriteString(aDiff.Text) - } - } - return text.String() -} - -// DiffText2 computes and returns the destination text (all equalities and insertions). -func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { - var text bytes.Buffer - - for _, aDiff := range diffs { - if aDiff.Type != DiffDelete { - _, _ = text.WriteString(aDiff.Text) - } - } - return text.String() -} - -// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters. -func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { - levenshtein := 0 - insertions := 0 - deletions := 0 - - for _, aDiff := range diffs { - switch aDiff.Type { - case DiffInsert: - insertions += len(aDiff.Text) - case DiffDelete: - deletions += len(aDiff.Text) - case DiffEqual: - // A deletion and an insertion is one substitution. - levenshtein += max(insertions, deletions) - insertions = 0 - deletions = 0 - } - } - - levenshtein += max(insertions, deletions) - return levenshtein -} - -// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2. -// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. -func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { - var text bytes.Buffer - for _, aDiff := range diffs { - switch aDiff.Type { - case DiffInsert: - _, _ = text.WriteString("+") - _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) - _, _ = text.WriteString("\t") - break - case DiffDelete: - _, _ = text.WriteString("-") - _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) - _, _ = text.WriteString("\t") - break - case DiffEqual: - _, _ = text.WriteString("=") - _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) - _, _ = text.WriteString("\t") - break - } - } - delta := text.String() - if len(delta) != 0 { - // Strip off trailing tab character. - delta = delta[0 : utf8.RuneCountInString(delta)-1] - delta = unescaper.Replace(delta) - } - return delta -} - -// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff. -func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) { - i := 0 - - for _, token := range strings.Split(delta, "\t") { - if len(token) == 0 { - // Blank tokens are ok (from a trailing \t). - continue - } - - // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality). - param := token[1:] - - switch op := token[0]; op { - case '+': - // Decode would Diff all "+" to " " - param = strings.Replace(param, "+", "%2b", -1) - param, err = url.QueryUnescape(param) - if err != nil { - return nil, err - } - if !utf8.ValidString(param) { - return nil, fmt.Errorf("invalid UTF-8 token: %q", param) - } - - diffs = append(diffs, Diff{DiffInsert, param}) - case '=', '-': - n, err := strconv.ParseInt(param, 10, 0) - if err != nil { - return nil, err - } else if n < 0 { - return nil, errors.New("Negative number in DiffFromDelta: " + param) - } - - // Remember that string slicing is by byte - we want by rune here. - text := string([]rune(text1)[i : i+int(n)]) - i += int(n) - - if op == '=' { - diffs = append(diffs, Diff{DiffEqual, text}) - } else { - diffs = append(diffs, Diff{DiffDelete, text}) - } - default: - // Anything else is an error. - return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) - } - } - - if i != len([]rune(text1)) { - return nil, fmt.Errorf("Delta length (%v) smaller than source text length (%v)", i, len(text1)) - } - - return diffs, nil -} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go deleted file mode 100644 index d3acc32ce..000000000 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text. -package diffmatchpatch - -import ( - "time" -) - -// DiffMatchPatch holds the configuration for diff-match-patch operations. -type DiffMatchPatch struct { - // Number of seconds to map a diff before giving up (0 for infinity). - DiffTimeout time.Duration - // Cost of an empty edit operation in terms of edit characters. - DiffEditCost int - // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match). - MatchDistance int - // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match. - PatchDeleteThreshold float64 - // Chunk size for context length. - PatchMargin int - // The number of bits in an int. - MatchMaxBits int - // At what point is no match declared (0.0 = perfection, 1.0 = very loose). - MatchThreshold float64 -} - -// New creates a new DiffMatchPatch object with default parameters. -func New() *DiffMatchPatch { - // Defaults. - return &DiffMatchPatch{ - DiffTimeout: time.Second, - DiffEditCost: 4, - MatchThreshold: 0.5, - MatchDistance: 1000, - PatchDeleteThreshold: 0.5, - PatchMargin: 4, - MatchMaxBits: 32, - } -} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/dmp.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/dmp.go new file mode 100644 index 000000000..0ec5f516a --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/dmp.go @@ -0,0 +1,2207 @@ +/** + * dmp.go + * + * Go language implementation of Google Diff, Match, and Patch library + * + * Original library is Copyright (c) 2006 Google Inc. + * http://code.google.com/p/google-diff-match-patch/ + * + * Copyright (c) 2012 Sergi Mansilla + * https://github.com/sergi/go-diff + * + * See included LICENSE file for license details. + */ + +// Package diffmatchpatch offers robust algorithms to perform the +// operations required for synchronizing plain text. +package diffmatchpatch + +import ( + "bytes" + "errors" + "fmt" + "html" + "math" + "net/url" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// The data structure representing a diff is an array of tuples: +// [[DiffDelete, 'Hello'], [DiffInsert, 'Goodbye'], [DiffEqual, ' world.']] +// which means: delete 'Hello', add 'Goodbye' and keep ' world.' + +type Operation int8 + +const ( + DiffDelete Operation = -1 + DiffInsert Operation = 1 + DiffEqual Operation = 0 +) + +// unescaper unescapes selected chars for compatability with JavaScript's encodeURI. +// In speed critical applications this could be dropped since the +// receiving application will certainly decode these fine. +// Note that this function is case-sensitive. Thus "%3F" would not be +// unescaped. But this is ok because it is only called with the output of +// HttpUtility.UrlEncode which returns lowercase hex. +// +// Example: "%3f" -> "?", "%24" -> "$", etc. +var unescaper = strings.NewReplacer( + "%21", "!", "%7E", "~", "%27", "'", + "%28", "(", "%29", ")", "%3B", ";", + "%2F", "/", "%3F", "?", "%3A", ":", + "%40", "@", "%26", "&", "%3D", "=", + "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") + +// Define some regex patterns for matching boundaries. +var ( + nonAlphaNumericRegex_ = regexp.MustCompile(`[^a-zA-Z0-9]`) + whitespaceRegex_ = regexp.MustCompile(`\s`) + linebreakRegex_ = regexp.MustCompile(`[\r\n]`) + blanklineEndRegex_ = regexp.MustCompile(`\n\r?\n$`) + blanklineStartRegex_ = regexp.MustCompile(`^\r?\n\r?\n`) +) + +func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { + return append(slice[:index], append(elements, slice[index+amount:]...)...) +} + +func indexOf(str string, pattern string, i int) int { + if i > len(str)-1 { + return -1 + } + + if i == 0 { + return strings.Index(str, pattern) + } + + str1 := str[0:i] + str2 := str[i:] + + ind := strings.Index(str2, pattern) + if ind == -1 { + return -1 + } + + return ind + len(str1) + +} + +// Return the index of pattern in target, starting at target[i]. +func runesIndexOf(target, pattern []rune, i int) int { + if i > len(target)-1 { + return -1 + } + ind := runesIndex(target[i:], pattern) + if ind == -1 { + return -1 + } + return i + ind +} + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +func max(x, y int) int { + if x > y { + return x + } + return y +} + +func runesEqual(r1, r2 []rune) bool { + if len(r1) != len(r2) { + return false + } + for i, c := range r1 { + if c != r2[i] { + return false + } + } + return true +} + +// The equivalent of strings.Index for rune slices. +func runesIndex(r1, r2 []rune) int { + last := len(r1) - len(r2) + for i := 0; i <= last; i++ { + if runesEqual(r1[i:i+len(r2)], r2) { + return i + } + } + return -1 +} + +// Diff represents one diff operation +type Diff struct { + Type Operation + Text string +} + +// Patch represents one patch operation. +type Patch struct { + diffs []Diff + start1 int + start2 int + length1 int + length2 int +} + +// String emulates GNU diff's format. +// Header: @@ -382,8 +481,9 @@ +// Indicies are printed as 1-based, not 0-based. +func (p *Patch) String() string { + var coords1, coords2 string + + if p.length1 == 0 { + coords1 = strconv.Itoa(p.start1) + ",0" + } else if p.length1 == 1 { + coords1 = strconv.Itoa(p.start1 + 1) + } else { + coords1 = strconv.Itoa(p.start1+1) + "," + strconv.Itoa(p.length1) + } + + if p.length2 == 0 { + coords2 = strconv.Itoa(p.start2) + ",0" + } else if p.length2 == 1 { + coords2 = strconv.Itoa(p.start2 + 1) + } else { + coords2 = strconv.Itoa(p.start2+1) + "," + strconv.Itoa(p.length2) + } + + var text bytes.Buffer + text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") + + // Escape the body of the patch with %xx notation. + for _, aDiff := range p.diffs { + switch aDiff.Type { + case DiffInsert: + text.WriteString("+") + case DiffDelete: + text.WriteString("-") + case DiffEqual: + text.WriteString(" ") + } + + text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + text.WriteString("\n") + } + + return unescaper.Replace(text.String()) +} + +type DiffMatchPatch struct { + // Number of seconds to map a diff before giving up (0 for infinity). + DiffTimeout time.Duration + // Cost of an empty edit operation in terms of edit characters. + DiffEditCost int + // How far to search for a match (0 = exact location, 1000+ = broad match). + // A match this many characters away from the expected location will add + // 1.0 to the score (0.0 is a perfect match). + MatchDistance int + // When deleting a large block of text (over ~64 characters), how close do + // the contents have to be to match the expected contents. (0.0 = perfection, + // 1.0 = very loose). Note that Match_Threshold controls how closely the + // end points of a delete need to match. + PatchDeleteThreshold float64 + // Chunk size for context length. + PatchMargin int + // The number of bits in an int. + MatchMaxBits int + // At what point is no match declared (0.0 = perfection, 1.0 = very loose). + MatchThreshold float64 +} + +// New creates a new DiffMatchPatch object with default parameters. +func New() *DiffMatchPatch { + // Defaults. + return &DiffMatchPatch{ + DiffTimeout: time.Second, + DiffEditCost: 4, + MatchThreshold: 0.5, + MatchDistance: 1000, + PatchDeleteThreshold: 0.5, + PatchMargin: 4, + MatchMaxBits: 32, + } +} + +// DiffMain finds the differences between two texts. +func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { + var deadline time.Time + if dmp.DiffTimeout <= 0 { + deadline = time.Now().Add(24 * 365 * time.Hour) + } else { + deadline = time.Now().Add(dmp.DiffTimeout) + } + return dmp.diffMain(text1, text2, checklines, deadline) +} + +func (dmp *DiffMatchPatch) diffMain(text1, text2 string, checklines bool, deadline time.Time) []Diff { + return dmp.diffMainRunes([]rune(text1), []rune(text2), checklines, deadline) +} + +// DiffMainRunes finds the differences between two rune sequences. +func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { + var deadline time.Time + if dmp.DiffTimeout <= 0 { + deadline = time.Now().Add(24 * 365 * time.Hour) + } else { + deadline = time.Now().Add(dmp.DiffTimeout) + } + return dmp.diffMainRunes(text1, text2, checklines, deadline) +} + +func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + if runesEqual(text1, text2) { + var diffs []Diff + if len(text1) > 0 { + diffs = append(diffs, Diff{DiffEqual, string(text1)}) + } + return diffs + } + // Trim off common prefix (speedup). + commonlength := commonPrefixLength(text1, text2) + commonprefix := text1[:commonlength] + text1 = text1[commonlength:] + text2 = text2[commonlength:] + + // Trim off common suffix (speedup). + commonlength = commonSuffixLength(text1, text2) + commonsuffix := text1[len(text1)-commonlength:] + text1 = text1[:len(text1)-commonlength] + text2 = text2[:len(text2)-commonlength] + + // Compute the diff on the middle block. + diffs := dmp.diffCompute(text1, text2, checklines, deadline) + + // Restore the prefix and suffix. + if len(commonprefix) != 0 { + diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...) + } + if len(commonsuffix) != 0 { + diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) + } + + return dmp.DiffCleanupMerge(diffs) +} + +// diffCompute finds the differences between two rune slices. Assumes that the texts do not +// have any common prefix or suffix. +func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + diffs := []Diff{} + if len(text1) == 0 { + // Just add some text (speedup). + return append(diffs, Diff{DiffInsert, string(text2)}) + } else if len(text2) == 0 { + // Just delete some text (speedup). + return append(diffs, Diff{DiffDelete, string(text1)}) + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if i := runesIndex(longtext, shorttext); i != -1 { + op := DiffInsert + // Swap insertions for deletions if diff is reversed. + if len(text1) > len(text2) { + op = DiffDelete + } + // Shorter text is inside the longer text (speedup). + return []Diff{ + Diff{op, string(longtext[:i])}, + Diff{DiffEqual, string(shorttext)}, + Diff{op, string(longtext[i+len(shorttext):])}, + } + } else if len(shorttext) == 1 { + // Single character string. + // After the previous speedup, the character can't be an equality. + return []Diff{ + Diff{DiffDelete, string(text1)}, + Diff{DiffInsert, string(text2)}, + } + // Check to see if the problem can be split in two. + } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { + // A half-match was found, sort out the return data. + text1_a := hm[0] + text1_b := hm[1] + text2_a := hm[2] + text2_b := hm[3] + mid_common := hm[4] + // Send both pairs off for separate processing. + diffs_a := dmp.diffMainRunes(text1_a, text2_a, checklines, deadline) + diffs_b := dmp.diffMainRunes(text1_b, text2_b, checklines, deadline) + // Merge the results. + return append(diffs_a, append([]Diff{Diff{DiffEqual, string(mid_common)}}, diffs_b...)...) + } else if checklines && len(text1) > 100 && len(text2) > 100 { + return dmp.diffLineMode(text1, text2, deadline) + } + return dmp.diffBisect(text1, text2, deadline) +} + +// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for +// greater accuracy. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { + // Scan the text on a line-by-line basis first. + text1, text2, linearray := dmp.diffLinesToRunes(text1, text2) + + diffs := dmp.diffMainRunes(text1, text2, false, deadline) + + // Convert the diff back to original text. + diffs = dmp.DiffCharsToLines(diffs, linearray) + // Eliminate freak matches (e.g. blank lines) + diffs = dmp.DiffCleanupSemantic(diffs) + + // Rediff any replacement blocks, this time character-by-character. + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + + pointer := 0 + count_delete := 0 + count_insert := 0 + text_delete := "" + text_insert := "" + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + count_insert++ + text_insert += diffs[pointer].Text + case DiffDelete: + count_delete++ + text_delete += diffs[pointer].Text + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if count_delete >= 1 && count_insert >= 1 { + // Delete the offending records and add the merged ones. + diffs = splice(diffs, pointer-count_delete-count_insert, + count_delete+count_insert) + + pointer = pointer - count_delete - count_insert + a := dmp.diffMain(text_delete, text_insert, false, deadline) + for j := len(a) - 1; j >= 0; j-- { + diffs = splice(diffs, pointer, 0, a[j]) + } + pointer = pointer + len(a) + } + + count_insert = 0 + count_delete = 0 + text_delete = "" + text_insert = "" + } + pointer++ + } + + return diffs[:len(diffs)-1] // Remove the dummy entry at the end. +} + +// DiffBisect finds the 'middle snake' of a diff, split the problem in two +// and return the recursively constructed diff. +// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { + // Unused in this code, but retained for interface compatibility. + return dmp.diffBisect([]rune(text1), []rune(text2), deadline) +} + +// diffBisect finds the 'middle snake' of a diff, splits the problem in two +// and returns the recursively constructed diff. +// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { + // Cache the text lengths to prevent multiple calls. + runes1_len, runes2_len := len(runes1), len(runes2) + + max_d := (runes1_len + runes2_len + 1) / 2 + v_offset := max_d + v_length := 2 * max_d + + v1 := make([]int, v_length) + v2 := make([]int, v_length) + for i := range v1 { + v1[i] = -1 + v2[i] = -1 + } + v1[v_offset+1] = 0 + v2[v_offset+1] = 0 + + delta := runes1_len - runes2_len + // If the total number of characters is odd, then the front path will collide + // with the reverse path. + front := (delta%2 != 0) + // Offsets for start and end of k loop. + // Prevents mapping of space beyond the grid. + k1start := 0 + k1end := 0 + k2start := 0 + k2end := 0 + for d := 0; d < max_d; d++ { + // Bail out if deadline is reached. + if time.Now().After(deadline) { + break + } + + // Walk the front path one step. + for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { + k1_offset := v_offset + k1 + var x1 int + + if k1 == -d || (k1 != d && v1[k1_offset-1] < v1[k1_offset+1]) { + x1 = v1[k1_offset+1] + } else { + x1 = v1[k1_offset-1] + 1 + } + + y1 := x1 - k1 + for x1 < runes1_len && y1 < runes2_len { + if runes1[x1] != runes2[y1] { + break + } + x1++ + y1++ + } + v1[k1_offset] = x1 + if x1 > runes1_len { + // Ran off the right of the graph. + k1end += 2 + } else if y1 > runes2_len { + // Ran off the bottom of the graph. + k1start += 2 + } else if front { + k2_offset := v_offset + delta - k1 + if k2_offset >= 0 && k2_offset < v_length && v2[k2_offset] != -1 { + // Mirror x2 onto top-left coordinate system. + x2 := runes1_len - v2[k2_offset] + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit_(runes1, runes2, x1, y1, deadline) + } + } + } + } + // Walk the reverse path one step. + for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { + k2_offset := v_offset + k2 + var x2 int + if k2 == -d || (k2 != d && v2[k2_offset-1] < v2[k2_offset+1]) { + x2 = v2[k2_offset+1] + } else { + x2 = v2[k2_offset-1] + 1 + } + var y2 = x2 - k2 + for x2 < runes1_len && y2 < runes2_len { + if runes1[runes1_len-x2-1] != runes2[runes2_len-y2-1] { + break + } + x2++ + y2++ + } + v2[k2_offset] = x2 + if x2 > runes1_len { + // Ran off the left of the graph. + k2end += 2 + } else if y2 > runes2_len { + // Ran off the top of the graph. + k2start += 2 + } else if !front { + k1_offset := v_offset + delta - k2 + if k1_offset >= 0 && k1_offset < v_length && v1[k1_offset] != -1 { + x1 := v1[k1_offset] + y1 := v_offset + x1 - k1_offset + // Mirror x2 onto top-left coordinate system. + x2 = runes1_len - x2 + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit_(runes1, runes2, x1, y1, deadline) + } + } + } + } + } + // Diff took too long and hit the deadline or + // number of diffs equals number of characters, no commonality at all. + return []Diff{ + Diff{DiffDelete, string(runes1)}, + Diff{DiffInsert, string(runes2)}, + } +} + +func (dmp *DiffMatchPatch) diffBisectSplit_(runes1, runes2 []rune, x, y int, + deadline time.Time) []Diff { + runes1a := runes1[:x] + runes2a := runes2[:y] + runes1b := runes1[x:] + runes2b := runes2[y:] + + // Compute both diffs serially. + diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) + diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) + + return append(diffs, diffsb...) +} + +// DiffLinesToChars split two texts into a list of strings. Reduces the texts to a string of +// hashes where each Unicode character represents one line. +// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. +func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { + chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2) + return string(chars1), string(chars2), lineArray +} + +// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line. +func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { + // '\x00' is a valid character, but various debuggers don't like it. + // So we'll insert a junk entry to avoid generating a null character. + lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' + lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 + + chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash) + chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash) + + return chars1, chars2, lineArray +} + +func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) { + return dmp.DiffLinesToRunes(string(text1), string(text2)) +} + +// diffLinesToRunesMunge splits a text into an array of strings. Reduces the +// texts to a []rune where each Unicode character represents one line. +// We use strings instead of []runes as input mainly because you can't use []rune as a map key. +func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune { + // Walk the text, pulling out a substring for each line. + // text.split('\n') would would temporarily double our memory footprint. + // Modifying text would create many large strings to garbage collect. + lineStart := 0 + lineEnd := -1 + runes := []rune{} + + for lineEnd < len(text)-1 { + lineEnd = indexOf(text, "\n", lineStart) + + if lineEnd == -1 { + lineEnd = len(text) - 1 + } + + line := text[lineStart : lineEnd+1] + lineStart = lineEnd + 1 + lineValue_, ok := lineHash[line] + + if ok { + runes = append(runes, rune(lineValue_)) + } else { + *lineArray = append(*lineArray, line) + lineHash[line] = len(*lineArray) - 1 + runes = append(runes, rune(len(*lineArray)-1)) + } + } + + return runes +} + +// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of +// text. +func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { + hydrated := make([]Diff, 0, len(diffs)) + for _, aDiff := range diffs { + chars := aDiff.Text + text := make([]string, len(chars)) + + for i, r := range chars { + text[i] = lineArray[r] + } + + aDiff.Text = strings.Join(text, "") + hydrated = append(hydrated, aDiff) + } + return hydrated +} + +// DiffCommonPrefix determines the common prefix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonPrefixLength([]rune(text1), []rune(text2)) +} + +// DiffCommonSuffix determines the common suffix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonSuffixLength([]rune(text1), []rune(text2)) +} + +// commonPrefixLength returns the length of the common prefix of two rune slices. +func commonPrefixLength(text1, text2 []rune) int { + short, long := text1, text2 + if len(short) > len(long) { + short, long = long, short + } + for i, r := range short { + if r != long[i] { + return i + } + } + return len(short) +} + +// commonSuffixLength returns the length of the common suffix of two rune slices. +func commonSuffixLength(text1, text2 []rune) int { + n := min(len(text1), len(text2)) + for i := 0; i < n; i++ { + if text1[len(text1)-i-1] != text2[len(text2)-i-1] { + return i + } + } + return n + + // Binary search. + // Performance analysis: http://neil.fraser.name/news/2007/10/09/ + /* + pointermin := 0 + pointermax := math.Min(len(text1), len(text2)) + pointermid := pointermax + pointerend := 0 + for pointermin < pointermid { + if text1[len(text1)-pointermid:len(text1)-pointerend] == + text2[len(text2)-pointermid:len(text2)-pointerend] { + pointermin = pointermid + pointerend = pointermin + } else { + pointermax = pointermid + } + pointermid = math.Floor((pointermax-pointermin)/2 + pointermin) + } + return pointermid + */ +} + +// DiffCommonOverlap determines if the suffix of one string is the prefix of another. +func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { + // Cache the text lengths to prevent multiple calls. + text1_length := len(text1) + text2_length := len(text2) + // Eliminate the null case. + if text1_length == 0 || text2_length == 0 { + return 0 + } + // Truncate the longer string. + if text1_length > text2_length { + text1 = text1[text1_length-text2_length:] + } else if text1_length < text2_length { + text2 = text2[0:text1_length] + } + text_length := int(math.Min(float64(text1_length), float64(text2_length))) + // Quick check for the worst case. + if text1 == text2 { + return text_length + } + + // Start by looking for a single character match + // and increase length until no match is found. + // Performance analysis: http://neil.fraser.name/news/2010/11/04/ + best := 0 + length := 1 + for { + pattern := text1[text_length-length:] + found := strings.Index(text2, pattern) + if found == -1 { + return best + } + length += found + if found == 0 || text1[text_length-length:] == text2[0:length] { + best = length + length++ + } + } + return 0 +} + +// DiffHalfMatch checks whether the two texts share a substring which is at +// least half the length of the longer text. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { + // Unused in this code, but retained for interface compatibility. + runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) + if runeSlices == nil { + return nil + } + + result := make([]string, len(runeSlices)) + for i, r := range runeSlices { + result[i] = string(r) + } + return result +} + +func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { + if dmp.DiffTimeout <= 0 { + // Don't risk returning a non-optimal diff if we have unlimited time. + return nil + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { + return nil // Pointless. + } + + // First check if the second quarter is the seed for a half-match. + hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) + + // Check again based on the third quarter. + hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) + + hm := [][]rune{} + if hm1 == nil && hm2 == nil { + return nil + } else if hm2 == nil { + hm = hm1 + } else if hm1 == nil { + hm = hm2 + } else { + // Both matched. Select the longest. + if len(hm1[4]) > len(hm2[4]) { + hm = hm1 + } else { + hm = hm2 + } + } + + // A half-match was found, sort out the return data. + if len(text1) > len(text2) { + return hm + } else { + return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} + } + + return nil +} + +/** + * Does a substring of shorttext exist within longtext such that the substring + * is at least half the length of longtext? + * @param {string} longtext Longer string. + * @param {string} shorttext Shorter string. + * @param {number} i Start index of quarter length substring within longtext. + * @return {Array.} Five element Array, containing the prefix of + * longtext, the suffix of longtext, the prefix of shorttext, the suffix + * of shorttext and the common middle. Or null if there was no match. + * @private + */ +func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { + // Start with a 1/4 length substring at position i as a seed. + seed := l[i : i+len(l)/4] + j := -1 + best_common := []rune{} + best_longtext_a := []rune{} + best_longtext_b := []rune{} + best_shorttext_a := []rune{} + best_shorttext_b := []rune{} + + if j < len(s) { + j = runesIndexOf(s, seed, j+1) + for { + if j == -1 { + break + } + + prefixLength := commonPrefixLength(l[i:], s[j:]) + suffixLength := commonSuffixLength(l[:i], s[:j]) + if len(best_common) < suffixLength+prefixLength { + best_common = concat(s[j-suffixLength:j], s[j:j+prefixLength]) + best_longtext_a = l[:i-suffixLength] + best_longtext_b = l[i+prefixLength:] + best_shorttext_a = s[:j-suffixLength] + best_shorttext_b = s[j+prefixLength:] + } + j = runesIndexOf(s, seed, j+1) + } + } + + if len(best_common)*2 >= len(l) { + return [][]rune{ + best_longtext_a, + best_longtext_b, + best_shorttext_a, + best_shorttext_b, + best_common, + } + } + return nil +} + +func concat(r1, r2 []rune) []rune { + result := make([]rune, len(r1)+len(r2)) + copy(result, r1) + copy(result[len(r1):], r2) + return result +} + +// Diff_cleanupSemantic reduces the number of edits by eliminating +// semantically trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { + changes := false + equalities := new(Stack) // Stack of indices where equalities are found. + + var lastequality string + // Always equal to diffs[equalities[equalitiesLength - 1]][1] + var pointer int // Index of current position. + // Number of characters that changed prior to the equality. + var length_insertions1, length_deletions1 int + // Number of characters that changed after the equality. + var length_insertions2, length_deletions2 int + + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { // Equality found. + equalities.Push(pointer) + length_insertions1 = length_insertions2 + length_deletions1 = length_deletions2 + length_insertions2 = 0 + length_deletions2 = 0 + lastequality = diffs[pointer].Text + } else { // An insertion or deletion. + if diffs[pointer].Type == DiffInsert { + length_insertions2 += len(diffs[pointer].Text) + } else { + length_deletions2 += len(diffs[pointer].Text) + } + // Eliminate an equality that is smaller or equal to the edits on both + // sides of it. + _difference1 := int(math.Max(float64(length_insertions1), float64(length_deletions1))) + _difference2 := int(math.Max(float64(length_insertions2), float64(length_deletions2))) + if len(lastequality) > 0 && + (len(lastequality) <= _difference1) && + (len(lastequality) <= _difference2) { + // Duplicate record. + insPoint := equalities.Peek().(int) + diffs = append( + diffs[:insPoint], + append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities.Pop() + + if equalities.Len() > 0 { + equalities.Pop() + pointer = equalities.Peek().(int) + } else { + pointer = -1 + } + + length_insertions1 = 0 // Reset the counters. + length_deletions1 = 0 + length_insertions2 = 0 + length_deletions2 = 0 + lastequality = "" + changes = true + } + } + pointer++ + } + + // Normalize the diff. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + diffs = dmp.DiffCleanupSemanticLossless(diffs) + // Find any overlaps between deletions and insertions. + // e.g: abcxxxxxxdef + // -> abcxxxdef + // e.g: xxxabcdefxxx + // -> defxxxabc + // Only extract an overlap if it is as big as the edit ahead or behind it. + pointer = 1 + for pointer < len(diffs) { + if diffs[pointer-1].Type == DiffDelete && + diffs[pointer].Type == DiffInsert { + deletion := diffs[pointer-1].Text + insertion := diffs[pointer].Text + overlap_length1 := dmp.DiffCommonOverlap(deletion, insertion) + overlap_length2 := dmp.DiffCommonOverlap(insertion, deletion) + if overlap_length1 >= overlap_length2 { + if float64(overlap_length1) >= float64(len(deletion))/2 || + float64(overlap_length1) >= float64(len(insertion))/2 { + + // Overlap found. Insert an equality and trim the surrounding edits. + diffs = append( + diffs[:pointer], + append([]Diff{Diff{DiffEqual, insertion[:overlap_length1]}}, diffs[pointer:]...)...) + //diffs.splice(pointer, 0, + // [DiffEqual, insertion[0 : overlap_length1)]] + diffs[pointer-1].Text = + deletion[0 : len(deletion)-overlap_length1] + diffs[pointer+1].Text = insertion[overlap_length1:] + pointer++ + } + } else { + if float64(overlap_length2) >= float64(len(deletion))/2 || + float64(overlap_length2) >= float64(len(insertion))/2 { + // Reverse overlap found. + // Insert an equality and swap and trim the surrounding edits. + overlap := Diff{DiffEqual, insertion[overlap_length2:]} + diffs = append( + diffs[:pointer], + append([]Diff{overlap}, diffs[pointer:]...)...) + // diffs.splice(pointer, 0, + // [DiffEqual, deletion[0 : overlap_length2)]] + diffs[pointer-1].Type = DiffInsert + diffs[pointer-1].Text = insertion[0 : len(insertion)-overlap_length2] + diffs[pointer+1].Type = DiffDelete + diffs[pointer+1].Text = deletion[overlap_length2:] + pointer++ + } + } + pointer++ + } + pointer++ + } + + return diffs +} + +// Diff_cleanupSemanticLossless looks for single edits surrounded on both sides by equalities +// which can be shifted sideways to align the edit to a word boundary. +// e.g: The cat came. -> The cat came. +func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { + + /** + * Given two strings, compute a score representing whether the internal + * boundary falls on logical boundaries. + * Scores range from 6 (best) to 0 (worst). + * Closure, but does not reference any external variables. + * @param {string} one First string. + * @param {string} two Second string. + * @return {number} The score. + * @private + */ + diffCleanupSemanticScore_ := func(one, two string) int { + if len(one) == 0 || len(two) == 0 { + // Edges are the best. + return 6 + } + + // Each port of this function behaves slightly differently due to + // subtle differences in each language's definition of things like + // 'whitespace'. Since this function's purpose is largely cosmetic, + // the choice has been made to use each language's native features + // rather than force total conformity. + rune1, _ := utf8.DecodeLastRuneInString(one) + rune2, _ := utf8.DecodeRuneInString(two) + char1 := string(rune1) + char2 := string(rune2) + + nonAlphaNumeric1 := nonAlphaNumericRegex_.MatchString(char1) + nonAlphaNumeric2 := nonAlphaNumericRegex_.MatchString(char2) + whitespace1 := nonAlphaNumeric1 && whitespaceRegex_.MatchString(char1) + whitespace2 := nonAlphaNumeric2 && whitespaceRegex_.MatchString(char2) + lineBreak1 := whitespace1 && linebreakRegex_.MatchString(char1) + lineBreak2 := whitespace2 && linebreakRegex_.MatchString(char2) + blankLine1 := lineBreak1 && blanklineEndRegex_.MatchString(one) + blankLine2 := lineBreak2 && blanklineEndRegex_.MatchString(two) + + if blankLine1 || blankLine2 { + // Five points for blank lines. + return 5 + } else if lineBreak1 || lineBreak2 { + // Four points for line breaks. + return 4 + } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { + // Three points for end of sentences. + return 3 + } else if whitespace1 || whitespace2 { + // Two points for whitespace. + return 2 + } else if nonAlphaNumeric1 || nonAlphaNumeric2 { + // One point for non-alphanumeric. + return 1 + } + return 0 + } + + pointer := 1 + + // Intentionally ignore the first and last element (don't need checking). + for pointer < len(diffs)-1 { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + + // This is a single edit surrounded by equalities. + equality1 := diffs[pointer-1].Text + edit := diffs[pointer].Text + equality2 := diffs[pointer+1].Text + + // First, shift the edit as far left as possible. + commonOffset := dmp.DiffCommonSuffix(equality1, edit) + if commonOffset > 0 { + commonString := edit[len(edit)-commonOffset:] + equality1 = equality1[0 : len(equality1)-commonOffset] + edit = commonString + edit[:len(edit)-commonOffset] + equality2 = commonString + equality2 + } + + // Second, step character by character right, looking for the best fit. + bestEquality1 := equality1 + bestEdit := edit + bestEquality2 := equality2 + bestScore := diffCleanupSemanticScore_(equality1, edit) + + diffCleanupSemanticScore_(edit, equality2) + + for len(edit) != 0 && len(equality2) != 0 { + _, sz := utf8.DecodeRuneInString(edit) + if len(equality2) < sz || edit[:sz] != equality2[:sz] { + break + } + equality1 += edit[:sz] + edit = edit[sz:] + equality2[:sz] + equality2 = equality2[sz:] + score := diffCleanupSemanticScore_(equality1, edit) + + diffCleanupSemanticScore_(edit, equality2) + // The >= encourages trailing rather than leading whitespace on + // edits. + if score >= bestScore { + bestScore = score + bestEquality1 = equality1 + bestEdit = edit + bestEquality2 = equality2 + } + } + + if diffs[pointer-1].Text != bestEquality1 { + // We have an improvement, save it back to the diff. + if len(bestEquality1) != 0 { + diffs[pointer-1].Text = bestEquality1 + } else { + diffs = splice(diffs, pointer-1, 1) + pointer-- + } + + diffs[pointer].Text = bestEdit + if len(bestEquality2) != 0 { + diffs[pointer+1].Text = bestEquality2 + } else { + //splice(diffs, pointer+1, 1) + diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) + pointer-- + } + } + } + pointer++ + } + + return diffs +} + +// Diff_cleanupEfficiency reduces the number of edits by eliminating +// operationally trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + equalities := new(Stack) + // Always equal to equalities[equalitiesLength-1][1] + lastequality := "" + pointer := 0 // Index of current position. + // Is there an insertion operation before the last equality. + pre_ins := false + // Is there a deletion operation before the last equality. + pre_del := false + // Is there an insertion operation after the last equality. + post_ins := false + // Is there a deletion operation after the last equality. + post_del := false + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { // Equality found. + if len(diffs[pointer].Text) < dmp.DiffEditCost && + (post_ins || post_del) { + // Candidate found. + equalities.Push(pointer) + pre_ins = post_ins + pre_del = post_del + lastequality = diffs[pointer].Text + } else { + // Not a candidate, and can never become one. + equalities.Clear() + lastequality = "" + } + post_ins = false + post_del = false + } else { // An insertion or deletion. + if diffs[pointer].Type == DiffDelete { + post_del = true + } else { + post_ins = true + } + /* + * Five types to be split: + * ABXYCD + * AXCD + * ABXC + * AXCD + * ABXC + */ + var sum_pres int + if pre_ins { + sum_pres++ + } + if pre_del { + sum_pres++ + } + if post_ins { + sum_pres++ + } + if post_del { + sum_pres++ + } + if len(lastequality) > 0 && + ((pre_ins && pre_del && post_ins && post_del) || + ((len(lastequality) < dmp.DiffEditCost/2) && sum_pres == 3)) { + + // Duplicate record. + diffs = append(diffs[:equalities.Peek().(int)], + append([]Diff{Diff{DiffDelete, lastequality}}, diffs[equalities.Peek().(int):]...)...) + + // Change second copy to insert. + diffs[equalities.Peek().(int)+1].Type = DiffInsert + equalities.Pop() // Throw away the equality we just deleted. + lastequality = "" + + if pre_ins && pre_del { + // No changes made which could affect previous entry, keep going. + post_ins = true + post_del = true + equalities.Clear() + } else { + if equalities.Len() > 0 { + equalities.Pop() + pointer = equalities.Peek().(int) + } else { + pointer = -1 + } + post_ins = false + post_del = false + } + changes = true + } + } + pointer++ + } + + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// Diff_cleanupMerge reorders and merges like edit sections. Merge equalities. +// Any edit section can move as long as it doesn't cross an equality. +func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + pointer := 0 + count_delete := 0 + count_insert := 0 + commonlength := 0 + text_delete := "" + text_insert := "" + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + count_insert += 1 + text_insert += diffs[pointer].Text + pointer += 1 + break + case DiffDelete: + count_delete += 1 + text_delete += diffs[pointer].Text + pointer += 1 + break + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if count_delete+count_insert > 1 { + if count_delete != 0 && count_insert != 0 { + // Factor out any common prefixies. + commonlength = dmp.DiffCommonPrefix(text_insert, text_delete) + if commonlength != 0 { + x := pointer - count_delete - count_insert + if x > 0 && diffs[x-1].Type == DiffEqual { + diffs[x-1].Text += text_insert[:commonlength] + } else { + diffs = append([]Diff{Diff{DiffEqual, text_insert[:commonlength]}}, diffs...) + pointer += 1 + } + text_insert = text_insert[commonlength:] + text_delete = text_delete[commonlength:] + } + // Factor out any common suffixies. + commonlength = dmp.DiffCommonSuffix(text_insert, text_delete) + if commonlength != 0 { + insert_index := len(text_insert) - commonlength + delete_index := len(text_delete) - commonlength + diffs[pointer].Text = text_insert[insert_index:] + diffs[pointer].Text + text_insert = text_insert[:insert_index] + text_delete = text_delete[:delete_index] + } + } + // Delete the offending records and add the merged ones. + if count_delete == 0 { + diffs = splice(diffs, pointer-count_insert, + count_delete+count_insert, + Diff{DiffInsert, text_insert}) + } else if count_insert == 0 { + diffs = splice(diffs, pointer-count_delete, + count_delete+count_insert, + Diff{DiffDelete, text_delete}) + } else { + diffs = splice(diffs, pointer-count_delete-count_insert, + count_delete+count_insert, + Diff{DiffDelete, text_delete}, + Diff{DiffInsert, text_insert}) + } + + pointer = pointer - count_delete - count_insert + 1 + if count_delete != 0 { + pointer += 1 + } + if count_insert != 0 { + pointer += 1 + } + } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { + // Merge this equality with the previous one. + diffs[pointer-1].Text += diffs[pointer].Text + diffs = append(diffs[:pointer], diffs[pointer+1:]...) + } else { + pointer++ + } + count_insert = 0 + count_delete = 0 + text_delete = "" + text_insert = "" + break + } + } + + if len(diffs[len(diffs)-1].Text) == 0 { + diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. + } + + // Second pass: look for single edits surrounded on both sides by + // equalities which can be shifted sideways to eliminate an equality. + // e.g: ABAC -> ABAC + changes := false + pointer = 1 + // Intentionally ignore the first and last element (don't need checking). + for pointer < (len(diffs) - 1) { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + // This is a single edit surrounded by equalities. + if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { + // Shift the edit over the previous equality. + diffs[pointer].Text = diffs[pointer-1].Text + + diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] + diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text + diffs = splice(diffs, pointer-1, 1) + changes = true + } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { + // Shift the edit over the next equality. + diffs[pointer-1].Text += diffs[pointer+1].Text + diffs[pointer].Text = + diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text + diffs = splice(diffs, pointer+1, 1) + changes = true + } + } + pointer++ + } + + // If shifts were made, the diff needs reordering and another shift sweep. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// Diff_xIndex. loc is a location in text1, comAdde and return the equivalent location in +// text2. +// e.g. "The cat" vs "The big cat", 1->1, 5->8 +func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { + chars1 := 0 + chars2 := 0 + last_chars1 := 0 + last_chars2 := 0 + lastDiff := Diff{} + for i := 0; i < len(diffs); i++ { + aDiff := diffs[i] + if aDiff.Type != DiffInsert { + // Equality or deletion. + chars1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + // Equality or insertion. + chars2 += len(aDiff.Text) + } + if chars1 > loc { + // Overshot the location. + lastDiff = aDiff + break + } + last_chars1 = chars1 + last_chars2 = chars2 + } + if lastDiff.Type == DiffDelete { + // The location was deleted. + return last_chars2 + } + // Add the remaining character length. + return last_chars2 + (loc - last_chars1) +} + +// DiffPrettyHtml converts a []Diff into a pretty HTML report. +// It is intended as an example from which to write one's own +// display functions. +func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶
", -1) + switch diff.Type { + case DiffInsert: + buff.WriteString("") + buff.WriteString(text) + buff.WriteString("") + case DiffDelete: + buff.WriteString("") + buff.WriteString(text) + buff.WriteString("") + case DiffEqual: + buff.WriteString("") + buff.WriteString(text) + buff.WriteString("") + } + } + return buff.String() +} + +// Diff_text1 computes and returns the source text (all equalities and deletions). +func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { + //StringBuilder text = new StringBuilder() + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffInsert { + text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// Diff_text2 computes and returns the destination text (all equalities and insertions). +func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffDelete { + text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// Diff_levenshtein computes the Levenshtein distance; the number of inserted, deleted or +// substituted characters. +func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { + levenshtein := 0 + insertions := 0 + deletions := 0 + + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + insertions += len(aDiff.Text) + case DiffDelete: + deletions += len(aDiff.Text) + case DiffEqual: + // A deletion and an insertion is one substitution. + levenshtein += max(insertions, deletions) + insertions = 0 + deletions = 0 + } + } + + levenshtein += max(insertions, deletions) + return levenshtein +} + +// Diff_toDelta crushes the diff into an encoded string which describes the operations +// required to transform text1 into text2. +// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. +// Operations are tab-separated. Inserted text is escaped using %xx +// notation. +func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { + var text bytes.Buffer + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + text.WriteString("+") + text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + text.WriteString("\t") + break + case DiffDelete: + text.WriteString("-") + text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + text.WriteString("\t") + break + case DiffEqual: + text.WriteString("=") + text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + text.WriteString("\t") + break + } + } + delta := text.String() + if len(delta) != 0 { + // Strip off trailing tab character. + delta = delta[0 : utf8.RuneCountInString(delta)-1] + delta = unescaper.Replace(delta) + } + return delta +} + +// Diff_fromDelta. Given the original text1, and an encoded string which describes the +// operations required to transform text1 into text2, comAdde the full diff. +func (dmp *DiffMatchPatch) DiffFromDelta(text1, delta string) (diffs []Diff, err error) { + diffs = []Diff{} + + defer func() { + if r := recover(); r != nil { + err = r.(error) + } + }() + + pointer := 0 // Cursor in text1 + tokens := strings.Split(delta, "\t") + + for _, token := range tokens { + if len(token) == 0 { + // Blank tokens are ok (from a trailing \t). + continue + } + + // Each token begins with a one character parameter which specifies the + // operation of this token (delete, insert, equality). + param := token[1:] + + switch op := token[0]; op { + case '+': + // decode would Diff all "+" to " " + param = strings.Replace(param, "+", "%2b", -1) + param, err = url.QueryUnescape(param) + if err != nil { + return nil, err + } + if !utf8.ValidString(param) { + return nil, fmt.Errorf("invalid UTF-8 token: %q", param) + } + diffs = append(diffs, Diff{DiffInsert, param}) + case '=', '-': + n, err := strconv.ParseInt(param, 10, 0) + if err != nil { + return diffs, err + } else if n < 0 { + return diffs, errors.New("Negative number in DiffFromDelta: " + param) + } + + // remember that string slicing is by byte - we want by rune here. + text := string([]rune(text1)[pointer : pointer+int(n)]) + pointer += int(n) + + if op == '=' { + diffs = append(diffs, Diff{DiffEqual, text}) + } else { + diffs = append(diffs, Diff{DiffDelete, text}) + } + default: + // Anything else is an error. + return diffs, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) + } + } + + if pointer != len([]rune(text1)) { + return diffs, fmt.Errorf("Delta length (%v) smaller than source text length (%v)", pointer, len(text1)) + } + return diffs, err +} + +// MATCH FUNCTIONS + +// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. +// Returns -1 if no match found. +func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { + // Check for null inputs not needed since null can't be passed in C#. + + loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) + if text == pattern { + // Shortcut (potentially not guaranteed by the algorithm) + return 0 + } else if len(text) == 0 { + // Nothing to match. + return -1 + } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { + // Perfect match at the perfect spot! (Includes case of null pattern) + return loc + } + // Do a fuzzy compare. + return dmp.MatchBitap(text, pattern, loc) +} + +// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the +// Bitap algorithm. Returns -1 if no match found. +func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { + // Initialise the alphabet. + s := dmp.MatchAlphabet(pattern) + + // Highest score beyond which we give up. + var score_threshold float64 = dmp.MatchThreshold + // Is there a nearby exact match? (speedup) + best_loc := strings.Index(text, pattern) + if best_loc != -1 { + score_threshold = math.Min(dmp.matchBitapScore(0, best_loc, loc, + pattern), score_threshold) + // What about in the other direction? (speedup) + best_loc = strings.LastIndex(text, pattern) + if best_loc != -1 { + score_threshold = math.Min(dmp.matchBitapScore(0, best_loc, loc, + pattern), score_threshold) + } + } + + // Initialise the bit arrays. + matchmask := 1 << uint((len(pattern) - 1)) + best_loc = -1 + + var bin_min, bin_mid int + bin_max := len(pattern) + len(text) + last_rd := []int{} + for d := 0; d < len(pattern); d++ { + // Scan for the best match; each iteration allows for one more error. + // Run a binary search to determine how far from 'loc' we can stray at + // this error level. + bin_min = 0 + bin_mid = bin_max + for bin_min < bin_mid { + if dmp.matchBitapScore(d, loc+bin_mid, loc, pattern) <= score_threshold { + bin_min = bin_mid + } else { + bin_max = bin_mid + } + bin_mid = (bin_max-bin_min)/2 + bin_min + } + // Use the result from this iteration as the maximum for the next. + bin_max = bin_mid + start := int(math.Max(1, float64(loc-bin_mid+1))) + finish := int(math.Min(float64(loc+bin_mid), float64(len(text))) + float64(len(pattern))) + + rd := make([]int, finish+2) + rd[finish+1] = (1 << uint(d)) - 1 + + for j := finish; j >= start; j-- { + var charMatch int + if len(text) <= j-1 { + // Out of range. + charMatch = 0 + } else if _, ok := s[text[j-1]]; !ok { + charMatch = 0 + } else { + charMatch = s[text[j-1]] + } + + if d == 0 { + // First pass: exact match. + rd[j] = ((rd[j+1] << 1) | 1) & charMatch + } else { + // Subsequent passes: fuzzy match. + rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((last_rd[j+1] | last_rd[j]) << 1) | 1) | last_rd[j+1] + } + if (rd[j] & matchmask) != 0 { + score := dmp.matchBitapScore(d, j-1, loc, pattern) + // This match will almost certainly be better than any existing + // match. But check anyway. + if score <= score_threshold { + // Told you so. + score_threshold = score + best_loc = j - 1 + if best_loc > loc { + // When passing loc, don't exceed our current distance from loc. + start = int(math.Max(1, float64(2*loc-best_loc))) + } else { + // Already passed loc, downhill from here on in. + break + } + } + } + } + if dmp.matchBitapScore(d+1, loc, loc, pattern) > score_threshold { + // No hope for a (better) match at greater error levels. + break + } + last_rd = rd + } + return best_loc +} + +// matchBitapScore computes and returns the score for a match with e errors and x location. +func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { + var accuracy float64 = float64(e) / float64(len(pattern)) + proximity := math.Abs(float64(loc - x)) + if dmp.MatchDistance == 0 { + // Dodge divide by zero error. + if proximity == 0 { + return accuracy + } else { + return 1.0 + } + } + return accuracy + (proximity / float64(dmp.MatchDistance)) +} + +// MatchAlphabet initialises the alphabet for the Bitap algorithm. +func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { + s := map[byte]int{} + char_pattern := []byte(pattern) + for _, c := range char_pattern { + _, ok := s[c] + if !ok { + s[c] = 0 + } + } + i := 0 + + for _, c := range char_pattern { + value := s[c] | int(uint(1)< 2 { + diffs = dmp.DiffCleanupSemantic(diffs) + diffs = dmp.DiffCleanupEfficiency(diffs) + } + return dmp.PatchMake(text1, diffs) + case []Diff: + return dmp.patchMake2(text1, t) + } + } else if len(opt) == 3 { + return dmp.PatchMake(opt[0], opt[2]) + } + return []Patch{} +} + +// Compute a list of patches to turn text1 into text2. +// text2 is not provided, diffs are the delta between text1 and text2. +func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { + // Check for null inputs not needed since null can't be passed in C#. + patches := []Patch{} + if len(diffs) == 0 { + return patches // Get rid of the null case. + } + + patch := Patch{} + char_count1 := 0 // Number of characters into the text1 string. + char_count2 := 0 // Number of characters into the text2 string. + // Start with text1 (prepatch_text) and apply the diffs until we arrive at + // text2 (postpatch_text). We recreate the patches one by one to determine + // context info. + prepatch_text := text1 + postpatch_text := text1 + + for i, aDiff := range diffs { + if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { + // A new patch starts here. + patch.start1 = char_count1 + patch.start2 = char_count2 + } + + switch aDiff.Type { + case DiffInsert: + patch.diffs = append(patch.diffs, aDiff) + patch.length2 += len(aDiff.Text) + postpatch_text = postpatch_text[:char_count2] + + aDiff.Text + postpatch_text[char_count2:] + case DiffDelete: + patch.length1 += len(aDiff.Text) + patch.diffs = append(patch.diffs, aDiff) + postpatch_text = postpatch_text[:char_count2] + postpatch_text[char_count2+len(aDiff.Text):] + case DiffEqual: + if len(aDiff.Text) <= 2*dmp.PatchMargin && + len(patch.diffs) != 0 && i != len(diffs)-1 { + // Small equality inside a patch. + patch.diffs = append(patch.diffs, aDiff) + patch.length1 += len(aDiff.Text) + patch.length2 += len(aDiff.Text) + } + if len(aDiff.Text) >= 2*dmp.PatchMargin { + // Time for a new patch. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatch_text) + patches = append(patches, patch) + patch = Patch{} + // Unlike Unidiff, our patch lists have a rolling context. + // http://code.google.com/p/google-diff-match-patch/wiki/Unidiff + // Update prepatch text & pos to reflect the application of the + // just completed patch. + prepatch_text = postpatch_text + char_count1 = char_count2 + } + } + } + + // Update the current character count. + if aDiff.Type != DiffInsert { + char_count1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + char_count2 += len(aDiff.Text) + } + } + + // Pick up the leftover patch if not empty. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatch_text) + patches = append(patches, patch) + } + + return patches +} + +// PatchDeepCopy returns an array that is identical to a +// given an array of patches. +func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { + patchesCopy := []Patch{} + for _, aPatch := range patches { + patchCopy := Patch{} + for _, aDiff := range aPatch.diffs { + patchCopy.diffs = append(patchCopy.diffs, Diff{ + aDiff.Type, + aDiff.Text, + }) + } + patchCopy.start1 = aPatch.start1 + patchCopy.start2 = aPatch.start2 + patchCopy.length1 = aPatch.length1 + patchCopy.length2 = aPatch.length2 + patchesCopy = append(patchesCopy, patchCopy) + } + return patchesCopy +} + +// PatchApply merges a set of patches onto the text. Returns a patched text, as well +// as an array of true/false values indicating which patches were applied. +func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { + if len(patches) == 0 { + return text, []bool{} + } + + // Deep copy the patches so that no changes are made to originals. + patches = dmp.PatchDeepCopy(patches) + + nullPadding := dmp.PatchAddPadding(patches) + text = nullPadding + text + nullPadding + patches = dmp.PatchSplitMax(patches) + + x := 0 + // delta keeps track of the offset between the expected and actual + // location of the previous patch. If there are patches expected at + // positions 10 and 20, but the first patch was found at 12, delta is 2 + // and the second patch has an effective expected position of 22. + delta := 0 + results := make([]bool, len(patches)) + for _, aPatch := range patches { + expected_loc := aPatch.start2 + delta + text1 := dmp.DiffText1(aPatch.diffs) + var start_loc int + end_loc := -1 + if len(text1) > dmp.MatchMaxBits { + // PatchSplitMax will only provide an oversized pattern + // in the case of a monster delete. + start_loc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expected_loc) + if start_loc != -1 { + end_loc = dmp.MatchMain(text, + text1[len(text1)-dmp.MatchMaxBits:], expected_loc+len(text1)-dmp.MatchMaxBits) + if end_loc == -1 || start_loc >= end_loc { + // Can't find valid trailing context. Drop this patch. + start_loc = -1 + } + } + } else { + start_loc = dmp.MatchMain(text, text1, expected_loc) + } + if start_loc == -1 { + // No match found. :( + results[x] = false + // Subtract the delta for this failed patch from subsequent patches. + delta -= aPatch.length2 - aPatch.length1 + } else { + // Found a match. :) + results[x] = true + delta = start_loc - expected_loc + var text2 string + if end_loc == -1 { + text2 = text[start_loc:int(math.Min(float64(start_loc+len(text1)), float64(len(text))))] + } else { + text2 = text[start_loc:int(math.Min(float64(end_loc+dmp.MatchMaxBits), float64(len(text))))] + } + if text1 == text2 { + // Perfect match, just shove the Replacement text in. + text = text[:start_loc] + dmp.DiffText2(aPatch.diffs) + text[start_loc+len(text1):] + } else { + // Imperfect match. Run a diff to get a framework of equivalent + // indices. + diffs := dmp.DiffMain(text1, text2, false) + if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { + // The end points match, but the content is unacceptably bad. + results[x] = false + } else { + diffs = dmp.DiffCleanupSemanticLossless(diffs) + index1 := 0 + for _, aDiff := range aPatch.diffs { + if aDiff.Type != DiffEqual { + index2 := dmp.DiffXIndex(diffs, index1) + if aDiff.Type == DiffInsert { + // Insertion + text = text[:start_loc+index2] + aDiff.Text + text[start_loc+index2:] + } else if aDiff.Type == DiffDelete { + // Deletion + start_index := start_loc + index2 + text = text[:start_index] + + text[start_index+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] + } + } + if aDiff.Type != DiffDelete { + index1 += len(aDiff.Text) + } + } + } + } + } + x++ + } + // Strip the padding off. + text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] + return text, results +} + +// PatchAddPadding adds some padding on text start and end so that edges can match something. +// Intended to be called only from within patch_apply. +func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { + paddingLength := dmp.PatchMargin + nullPadding := "" + for x := 1; x <= paddingLength; x++ { + nullPadding += string(x) + } + + // Bump all the patches forward. + for i, _ := range patches { + patches[i].start1 += paddingLength + patches[i].start2 += paddingLength + } + + // Add some padding on start of first diff. + if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { + // Add nullPadding equality. + patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) + patches[0].start1 -= paddingLength // Should be 0. + patches[0].start2 -= paddingLength // Should be 0. + patches[0].length1 += paddingLength + patches[0].length2 += paddingLength + } else if paddingLength > len(patches[0].diffs[0].Text) { + // Grow first equality. + extraLength := paddingLength - len(patches[0].diffs[0].Text) + patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text + patches[0].start1 -= extraLength + patches[0].start2 -= extraLength + patches[0].length1 += extraLength + patches[0].length2 += extraLength + } + + // Add some padding on end of last diff. + last := len(patches) - 1 + if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { + // Add nullPadding equality. + patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) + patches[last].length1 += paddingLength + patches[last].length2 += paddingLength + } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { + // Grow last equality. + lastDiff := patches[last].diffs[len(patches[last].diffs)-1] + extraLength := paddingLength - len(lastDiff.Text) + patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] + patches[last].length1 += extraLength + patches[last].length2 += extraLength + } + + return nullPadding +} + +// PatchSplitMax looks through the patches and breaks up any which are longer than the +// maximum limit of the match algorithm. +// Intended to be called only from within patch_apply. +func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { + patch_size := dmp.MatchMaxBits + for x := 0; x < len(patches); x++ { + if patches[x].length1 <= patch_size { + continue + } + bigpatch := patches[x] + // Remove the big old patch. + patches = append(patches[:x], patches[x+1:]...) + x -= 1 + + start1 := bigpatch.start1 + start2 := bigpatch.start2 + precontext := "" + for len(bigpatch.diffs) != 0 { + // Create one of several smaller patches. + patch := Patch{} + empty := true + patch.start1 = start1 - len(precontext) + patch.start2 = start2 - len(precontext) + if len(precontext) != 0 { + patch.length1 = len(precontext) + patch.length2 = len(precontext) + patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) + } + for len(bigpatch.diffs) != 0 && patch.length1 < patch_size-dmp.PatchMargin { + diff_type := bigpatch.diffs[0].Type + diff_text := bigpatch.diffs[0].Text + if diff_type == DiffInsert { + // Insertions are harmless. + patch.length2 += len(diff_text) + start2 += len(diff_text) + patch.diffs = append(patch.diffs, bigpatch.diffs[0]) + bigpatch.diffs = bigpatch.diffs[1:] + empty = false + } else if diff_type == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diff_text) > 2*patch_size { + // This is a large deletion. Let it pass in one chunk. + patch.length1 += len(diff_text) + start1 += len(diff_text) + empty = false + patch.diffs = append(patch.diffs, Diff{diff_type, diff_text}) + bigpatch.diffs = bigpatch.diffs[1:] + } else { + // Deletion or equality. Only take as much as we can stomach. + diff_text = diff_text[:min(len(diff_text), patch_size-patch.length1-dmp.PatchMargin)] + + patch.length1 += len(diff_text) + start1 += len(diff_text) + if diff_type == DiffEqual { + patch.length2 += len(diff_text) + start2 += len(diff_text) + } else { + empty = false + } + patch.diffs = append(patch.diffs, Diff{diff_type, diff_text}) + if diff_text == bigpatch.diffs[0].Text { + bigpatch.diffs = bigpatch.diffs[1:] + } else { + bigpatch.diffs[0].Text = + bigpatch.diffs[0].Text[len(diff_text):] + } + } + } + // Compute the head context for the next patch. + precontext = dmp.DiffText2(patch.diffs) + precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] + + postcontext := "" + // Append the end context for this patch. + if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { + postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] + } else { + postcontext = dmp.DiffText1(bigpatch.diffs) + } + + if len(postcontext) != 0 { + patch.length1 += len(postcontext) + patch.length2 += len(postcontext) + if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { + patch.diffs[len(patch.diffs)-1].Text += postcontext + } else { + patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) + } + } + if !empty { + x += 1 + patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) + } + } + } + return patches +} + +// PatchToText takes a list of patches and returns a textual representation. +func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { + var text bytes.Buffer + for _, aPatch := range patches { + text.WriteString(aPatch.String()) + } + return text.String() +} + +// PatchFromText parses a textual representation of patches and returns a List of Patch +// objects. +func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { + patches := []Patch{} + if len(textline) == 0 { + return patches, nil + } + text := strings.Split(textline, "\n") + textPointer := 0 + patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") + + var patch Patch + var sign uint8 + var line string + for textPointer < len(text) { + + if !patchHeader.MatchString(text[textPointer]) { + return patches, errors.New("Invalid patch string: " + text[textPointer]) + } + + patch = Patch{} + m := patchHeader.FindStringSubmatch(text[textPointer]) + + patch.start1, _ = strconv.Atoi(m[1]) + if len(m[2]) == 0 { + patch.start1-- + patch.length1 = 1 + } else if m[2] == "0" { + patch.length1 = 0 + } else { + patch.start1-- + patch.length1, _ = strconv.Atoi(m[2]) + } + + patch.start2, _ = strconv.Atoi(m[3]) + + if len(m[4]) == 0 { + patch.start2-- + patch.length2 = 1 + } else if m[4] == "0" { + patch.length2 = 0 + } else { + patch.start2-- + patch.length2, _ = strconv.Atoi(m[4]) + } + textPointer++ + + for textPointer < len(text) { + if len(text[textPointer]) > 0 { + sign = text[textPointer][0] + } else { + textPointer++ + continue + } + + line = text[textPointer][1:] + line = strings.Replace(line, "+", "%2b", -1) + line, _ = url.QueryUnescape(line) + if sign == '-' { + // Deletion. + patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) + } else if sign == '+' { + // Insertion. + patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) + } else if sign == ' ' { + // Minor equality. + patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) + } else if sign == '@' { + // Start of next patch. + break + } else { + // WTF? + return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) + } + textPointer++ + } + + patches = append(patches, patch) + } + return patches, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go deleted file mode 100644 index 17374e109..000000000 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -package diffmatchpatch - -import ( - "math" -) - -// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. -// Returns -1 if no match found. -func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { - // Check for null inputs not needed since null can't be passed in C#. - - loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) - if text == pattern { - // Shortcut (potentially not guaranteed by the algorithm) - return 0 - } else if len(text) == 0 { - // Nothing to match. - return -1 - } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { - // Perfect match at the perfect spot! (Includes case of null pattern) - return loc - } - // Do a fuzzy compare. - return dmp.MatchBitap(text, pattern, loc) -} - -// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. -// Returns -1 if no match was found. -func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { - // Initialise the alphabet. - s := dmp.MatchAlphabet(pattern) - - // Highest score beyond which we give up. - scoreThreshold := dmp.MatchThreshold - // Is there a nearby exact match? (speedup) - bestLoc := indexOf(text, pattern, loc) - if bestLoc != -1 { - scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, - pattern), scoreThreshold) - // What about in the other direction? (speedup) - bestLoc = lastIndexOf(text, pattern, loc+len(pattern)) - if bestLoc != -1 { - scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, - pattern), scoreThreshold) - } - } - - // Initialise the bit arrays. - matchmask := 1 << uint((len(pattern) - 1)) - bestLoc = -1 - - var binMin, binMid int - binMax := len(pattern) + len(text) - lastRd := []int{} - for d := 0; d < len(pattern); d++ { - // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level. - binMin = 0 - binMid = binMax - for binMin < binMid { - if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold { - binMin = binMid - } else { - binMax = binMid - } - binMid = (binMax-binMin)/2 + binMin - } - // Use the result from this iteration as the maximum for the next. - binMax = binMid - start := int(math.Max(1, float64(loc-binMid+1))) - finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern))) - - rd := make([]int, finish+2) - rd[finish+1] = (1 << uint(d)) - 1 - - for j := finish; j >= start; j-- { - var charMatch int - if len(text) <= j-1 { - // Out of range. - charMatch = 0 - } else if _, ok := s[text[j-1]]; !ok { - charMatch = 0 - } else { - charMatch = s[text[j-1]] - } - - if d == 0 { - // First pass: exact match. - rd[j] = ((rd[j+1] << 1) | 1) & charMatch - } else { - // Subsequent passes: fuzzy match. - rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1] - } - if (rd[j] & matchmask) != 0 { - score := dmp.matchBitapScore(d, j-1, loc, pattern) - // This match will almost certainly be better than any existing match. But check anyway. - if score <= scoreThreshold { - // Told you so. - scoreThreshold = score - bestLoc = j - 1 - if bestLoc > loc { - // When passing loc, don't exceed our current distance from loc. - start = int(math.Max(1, float64(2*loc-bestLoc))) - } else { - // Already passed loc, downhill from here on in. - break - } - } - } - } - if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold { - // No hope for a (better) match at greater error levels. - break - } - lastRd = rd - } - return bestLoc -} - -// matchBitapScore computes and returns the score for a match with e errors and x location. -func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { - accuracy := float64(e) / float64(len(pattern)) - proximity := math.Abs(float64(loc - x)) - if dmp.MatchDistance == 0 { - // Dodge divide by zero error. - if proximity == 0 { - return accuracy - } - - return 1.0 - } - return accuracy + (proximity / float64(dmp.MatchDistance)) -} - -// MatchAlphabet initialises the alphabet for the Bitap algorithm. -func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { - s := map[byte]int{} - charPattern := []byte(pattern) - for _, c := range charPattern { - _, ok := s[c] - if !ok { - s[c] = 0 - } - } - i := 0 - - for _, c := range charPattern { - value := s[c] | int(uint(1)< y { - return x - } - return y -} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go deleted file mode 100644 index 116c04348..000000000 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -package diffmatchpatch - -import ( - "bytes" - "errors" - "math" - "net/url" - "regexp" - "strconv" - "strings" -) - -// Patch represents one patch operation. -type Patch struct { - diffs []Diff - start1 int - start2 int - length1 int - length2 int -} - -// String emulates GNU diff's format. -// Header: @@ -382,8 +481,9 @@ -// Indicies are printed as 1-based, not 0-based. -func (p *Patch) String() string { - var coords1, coords2 string - - if p.length1 == 0 { - coords1 = strconv.Itoa(p.start1) + ",0" - } else if p.length1 == 1 { - coords1 = strconv.Itoa(p.start1 + 1) - } else { - coords1 = strconv.Itoa(p.start1+1) + "," + strconv.Itoa(p.length1) - } - - if p.length2 == 0 { - coords2 = strconv.Itoa(p.start2) + ",0" - } else if p.length2 == 1 { - coords2 = strconv.Itoa(p.start2 + 1) - } else { - coords2 = strconv.Itoa(p.start2+1) + "," + strconv.Itoa(p.length2) - } - - var text bytes.Buffer - _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") - - // Escape the body of the patch with %xx notation. - for _, aDiff := range p.diffs { - switch aDiff.Type { - case DiffInsert: - _, _ = text.WriteString("+") - case DiffDelete: - _, _ = text.WriteString("-") - case DiffEqual: - _, _ = text.WriteString(" ") - } - - _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) - _, _ = text.WriteString("\n") - } - - return unescaper.Replace(text.String()) -} - -// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits. -func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch { - if len(text) == 0 { - return patch - } - - pattern := text[patch.start2 : patch.start2+patch.length1] - padding := 0 - - // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length. - for strings.Index(text, pattern) != strings.LastIndex(text, pattern) && - len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin { - padding += dmp.PatchMargin - maxStart := max(0, patch.start2-padding) - minEnd := min(len(text), patch.start2+patch.length1+padding) - pattern = text[maxStart:minEnd] - } - // Add one chunk for good luck. - padding += dmp.PatchMargin - - // Add the prefix. - prefix := text[max(0, patch.start2-padding):patch.start2] - if len(prefix) != 0 { - patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...) - } - // Add the suffix. - suffix := text[patch.start2+patch.length1 : min(len(text), patch.start2+patch.length1+padding)] - if len(suffix) != 0 { - patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix}) - } - - // Roll back the start points. - patch.start1 -= len(prefix) - patch.start2 -= len(prefix) - // Extend the lengths. - patch.length1 += len(prefix) + len(suffix) - patch.length2 += len(prefix) + len(suffix) - - return patch -} - -// PatchMake computes a list of patches. -func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch { - if len(opt) == 1 { - diffs, _ := opt[0].([]Diff) - text1 := dmp.DiffText1(diffs) - return dmp.PatchMake(text1, diffs) - } else if len(opt) == 2 { - text1 := opt[0].(string) - switch t := opt[1].(type) { - case string: - diffs := dmp.DiffMain(text1, t, true) - if len(diffs) > 2 { - diffs = dmp.DiffCleanupSemantic(diffs) - diffs = dmp.DiffCleanupEfficiency(diffs) - } - return dmp.PatchMake(text1, diffs) - case []Diff: - return dmp.patchMake2(text1, t) - } - } else if len(opt) == 3 { - return dmp.PatchMake(opt[0], opt[2]) - } - return []Patch{} -} - -// patchMake2 computes a list of patches to turn text1 into text2. -// text2 is not provided, diffs are the delta between text1 and text2. -func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { - // Check for null inputs not needed since null can't be passed in C#. - patches := []Patch{} - if len(diffs) == 0 { - return patches // Get rid of the null case. - } - - patch := Patch{} - charCount1 := 0 // Number of characters into the text1 string. - charCount2 := 0 // Number of characters into the text2 string. - // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info. - prepatchText := text1 - postpatchText := text1 - - for i, aDiff := range diffs { - if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { - // A new patch starts here. - patch.start1 = charCount1 - patch.start2 = charCount2 - } - - switch aDiff.Type { - case DiffInsert: - patch.diffs = append(patch.diffs, aDiff) - patch.length2 += len(aDiff.Text) - postpatchText = postpatchText[:charCount2] + - aDiff.Text + postpatchText[charCount2:] - case DiffDelete: - patch.length1 += len(aDiff.Text) - patch.diffs = append(patch.diffs, aDiff) - postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):] - case DiffEqual: - if len(aDiff.Text) <= 2*dmp.PatchMargin && - len(patch.diffs) != 0 && i != len(diffs)-1 { - // Small equality inside a patch. - patch.diffs = append(patch.diffs, aDiff) - patch.length1 += len(aDiff.Text) - patch.length2 += len(aDiff.Text) - } - if len(aDiff.Text) >= 2*dmp.PatchMargin { - // Time for a new patch. - if len(patch.diffs) != 0 { - patch = dmp.PatchAddContext(patch, prepatchText) - patches = append(patches, patch) - patch = Patch{} - // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch. - prepatchText = postpatchText - charCount1 = charCount2 - } - } - } - - // Update the current character count. - if aDiff.Type != DiffInsert { - charCount1 += len(aDiff.Text) - } - if aDiff.Type != DiffDelete { - charCount2 += len(aDiff.Text) - } - } - - // Pick up the leftover patch if not empty. - if len(patch.diffs) != 0 { - patch = dmp.PatchAddContext(patch, prepatchText) - patches = append(patches, patch) - } - - return patches -} - -// PatchDeepCopy returns an array that is identical to a given an array of patches. -func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { - patchesCopy := []Patch{} - for _, aPatch := range patches { - patchCopy := Patch{} - for _, aDiff := range aPatch.diffs { - patchCopy.diffs = append(patchCopy.diffs, Diff{ - aDiff.Type, - aDiff.Text, - }) - } - patchCopy.start1 = aPatch.start1 - patchCopy.start2 = aPatch.start2 - patchCopy.length1 = aPatch.length1 - patchCopy.length2 = aPatch.length2 - patchesCopy = append(patchesCopy, patchCopy) - } - return patchesCopy -} - -// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied. -func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { - if len(patches) == 0 { - return text, []bool{} - } - - // Deep copy the patches so that no changes are made to originals. - patches = dmp.PatchDeepCopy(patches) - - nullPadding := dmp.PatchAddPadding(patches) - text = nullPadding + text + nullPadding - patches = dmp.PatchSplitMax(patches) - - x := 0 - // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22. - delta := 0 - results := make([]bool, len(patches)) - for _, aPatch := range patches { - expectedLoc := aPatch.start2 + delta - text1 := dmp.DiffText1(aPatch.diffs) - var startLoc int - endLoc := -1 - if len(text1) > dmp.MatchMaxBits { - // PatchSplitMax will only provide an oversized pattern in the case of a monster delete. - startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc) - if startLoc != -1 { - endLoc = dmp.MatchMain(text, - text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits) - if endLoc == -1 || startLoc >= endLoc { - // Can't find valid trailing context. Drop this patch. - startLoc = -1 - } - } - } else { - startLoc = dmp.MatchMain(text, text1, expectedLoc) - } - if startLoc == -1 { - // No match found. :( - results[x] = false - // Subtract the delta for this failed patch from subsequent patches. - delta -= aPatch.length2 - aPatch.length1 - } else { - // Found a match. :) - results[x] = true - delta = startLoc - expectedLoc - var text2 string - if endLoc == -1 { - text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))] - } else { - text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))] - } - if text1 == text2 { - // Perfect match, just shove the Replacement text in. - text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):] - } else { - // Imperfect match. Run a diff to get a framework of equivalent indices. - diffs := dmp.DiffMain(text1, text2, false) - if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { - // The end points match, but the content is unacceptably bad. - results[x] = false - } else { - diffs = dmp.DiffCleanupSemanticLossless(diffs) - index1 := 0 - for _, aDiff := range aPatch.diffs { - if aDiff.Type != DiffEqual { - index2 := dmp.DiffXIndex(diffs, index1) - if aDiff.Type == DiffInsert { - // Insertion - text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:] - } else if aDiff.Type == DiffDelete { - // Deletion - startIndex := startLoc + index2 - text = text[:startIndex] + - text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] - } - } - if aDiff.Type != DiffDelete { - index1 += len(aDiff.Text) - } - } - } - } - } - x++ - } - // Strip the padding off. - text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] - return text, results -} - -// PatchAddPadding adds some padding on text start and end so that edges can match something. -// Intended to be called only from within patchApply. -func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { - paddingLength := dmp.PatchMargin - nullPadding := "" - for x := 1; x <= paddingLength; x++ { - nullPadding += string(x) - } - - // Bump all the patches forward. - for i := range patches { - patches[i].start1 += paddingLength - patches[i].start2 += paddingLength - } - - // Add some padding on start of first diff. - if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { - // Add nullPadding equality. - patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) - patches[0].start1 -= paddingLength // Should be 0. - patches[0].start2 -= paddingLength // Should be 0. - patches[0].length1 += paddingLength - patches[0].length2 += paddingLength - } else if paddingLength > len(patches[0].diffs[0].Text) { - // Grow first equality. - extraLength := paddingLength - len(patches[0].diffs[0].Text) - patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text - patches[0].start1 -= extraLength - patches[0].start2 -= extraLength - patches[0].length1 += extraLength - patches[0].length2 += extraLength - } - - // Add some padding on end of last diff. - last := len(patches) - 1 - if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { - // Add nullPadding equality. - patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) - patches[last].length1 += paddingLength - patches[last].length2 += paddingLength - } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { - // Grow last equality. - lastDiff := patches[last].diffs[len(patches[last].diffs)-1] - extraLength := paddingLength - len(lastDiff.Text) - patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] - patches[last].length1 += extraLength - patches[last].length2 += extraLength - } - - return nullPadding -} - -// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm. -// Intended to be called only from within patchApply. -func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { - patchSize := dmp.MatchMaxBits - for x := 0; x < len(patches); x++ { - if patches[x].length1 <= patchSize { - continue - } - bigpatch := patches[x] - // Remove the big old patch. - patches = append(patches[:x], patches[x+1:]...) - x-- - - start1 := bigpatch.start1 - start2 := bigpatch.start2 - precontext := "" - for len(bigpatch.diffs) != 0 { - // Create one of several smaller patches. - patch := Patch{} - empty := true - patch.start1 = start1 - len(precontext) - patch.start2 = start2 - len(precontext) - if len(precontext) != 0 { - patch.length1 = len(precontext) - patch.length2 = len(precontext) - patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) - } - for len(bigpatch.diffs) != 0 && patch.length1 < patchSize-dmp.PatchMargin { - diffType := bigpatch.diffs[0].Type - diffText := bigpatch.diffs[0].Text - if diffType == DiffInsert { - // Insertions are harmless. - patch.length2 += len(diffText) - start2 += len(diffText) - patch.diffs = append(patch.diffs, bigpatch.diffs[0]) - bigpatch.diffs = bigpatch.diffs[1:] - empty = false - } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize { - // This is a large deletion. Let it pass in one chunk. - patch.length1 += len(diffText) - start1 += len(diffText) - empty = false - patch.diffs = append(patch.diffs, Diff{diffType, diffText}) - bigpatch.diffs = bigpatch.diffs[1:] - } else { - // Deletion or equality. Only take as much as we can stomach. - diffText = diffText[:min(len(diffText), patchSize-patch.length1-dmp.PatchMargin)] - - patch.length1 += len(diffText) - start1 += len(diffText) - if diffType == DiffEqual { - patch.length2 += len(diffText) - start2 += len(diffText) - } else { - empty = false - } - patch.diffs = append(patch.diffs, Diff{diffType, diffText}) - if diffText == bigpatch.diffs[0].Text { - bigpatch.diffs = bigpatch.diffs[1:] - } else { - bigpatch.diffs[0].Text = - bigpatch.diffs[0].Text[len(diffText):] - } - } - } - // Compute the head context for the next patch. - precontext = dmp.DiffText2(patch.diffs) - precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] - - postcontext := "" - // Append the end context for this patch. - if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { - postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] - } else { - postcontext = dmp.DiffText1(bigpatch.diffs) - } - - if len(postcontext) != 0 { - patch.length1 += len(postcontext) - patch.length2 += len(postcontext) - if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { - patch.diffs[len(patch.diffs)-1].Text += postcontext - } else { - patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) - } - } - if !empty { - x++ - patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) - } - } - } - return patches -} - -// PatchToText takes a list of patches and returns a textual representation. -func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { - var text bytes.Buffer - for _, aPatch := range patches { - _, _ = text.WriteString(aPatch.String()) - } - return text.String() -} - -// PatchFromText parses a textual representation of patches and returns a List of Patch objects. -func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { - patches := []Patch{} - if len(textline) == 0 { - return patches, nil - } - text := strings.Split(textline, "\n") - textPointer := 0 - patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") - - var patch Patch - var sign uint8 - var line string - for textPointer < len(text) { - - if !patchHeader.MatchString(text[textPointer]) { - return patches, errors.New("Invalid patch string: " + text[textPointer]) - } - - patch = Patch{} - m := patchHeader.FindStringSubmatch(text[textPointer]) - - patch.start1, _ = strconv.Atoi(m[1]) - if len(m[2]) == 0 { - patch.start1-- - patch.length1 = 1 - } else if m[2] == "0" { - patch.length1 = 0 - } else { - patch.start1-- - patch.length1, _ = strconv.Atoi(m[2]) - } - - patch.start2, _ = strconv.Atoi(m[3]) - - if len(m[4]) == 0 { - patch.start2-- - patch.length2 = 1 - } else if m[4] == "0" { - patch.length2 = 0 - } else { - patch.start2-- - patch.length2, _ = strconv.Atoi(m[4]) - } - textPointer++ - - for textPointer < len(text) { - if len(text[textPointer]) > 0 { - sign = text[textPointer][0] - } else { - textPointer++ - continue - } - - line = text[textPointer][1:] - line = strings.Replace(line, "+", "%2b", -1) - line, _ = url.QueryUnescape(line) - if sign == '-' { - // Deletion. - patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) - } else if sign == '+' { - // Insertion. - patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) - } else if sign == ' ' { - // Minor equality. - patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) - } else if sign == '@' { - // Start of next patch. - break - } else { - // WTF? - return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) - } - textPointer++ - } - - patches = append(patches, patch) - } - return patches, nil -} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stack.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stack.go new file mode 100644 index 000000000..d28ae4597 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stack.go @@ -0,0 +1,66 @@ +package diffmatchpatch + +import ( + "fmt" +) + +type Stack struct { + top *Element + size int +} + +type Element struct { + value interface{} + next *Element +} + +// Len returns the stack's length +func (s *Stack) Len() int { + return s.size +} + +// Push appends a new element onto the stack +func (s *Stack) Push(value interface{}) { + s.top = &Element{value, s.top} + s.size++ +} + +// Pop removes the top element from the stack and return its value +// If the stack is empty, return nil +func (s *Stack) Pop() (value interface{}) { + if s.size > 0 { + value, s.top = s.top.value, s.top.next + s.size-- + return + } + return nil +} + +// Peek returns the value of the element on the top of the stack +// but don't remove it. If the stack is empty, return nil +func (s *Stack) Peek() (value interface{}) { + if s.size > 0 { + value = s.top.value + return + } + return -1 +} + +// Clear empties the stack +func (s *Stack) Clear() { + s.top = nil + s.size = 0 +} + +func main() { + stack := new(Stack) + + stack.Push("Things") + stack.Push("and") + stack.Push("Stuff") + + for stack.Len() > 0 { + fmt.Printf("%s ", stack.Pop().(string)) + } + fmt.Println() +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go deleted file mode 100644 index 265f29cc7..000000000 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. -// https://github.com/sergi/go-diff -// See the included LICENSE file for license details. -// -// go-diff is a Go implementation of Google's Diff, Match, and Patch library -// Original library is Copyright (c) 2006 Google Inc. -// http://code.google.com/p/google-diff-match-patch/ - -package diffmatchpatch - -import ( - "strings" - "unicode/utf8" -) - -// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. -// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. -var unescaper = strings.NewReplacer( - "%21", "!", "%7E", "~", "%27", "'", - "%28", "(", "%29", ")", "%3B", ";", - "%2F", "/", "%3F", "?", "%3A", ":", - "%40", "@", "%26", "&", "%3D", "=", - "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") - -// indexOf returns the first index of pattern in str, starting at str[i]. -func indexOf(str string, pattern string, i int) int { - if i > len(str)-1 { - return -1 - } - if i <= 0 { - return strings.Index(str, pattern) - } - ind := strings.Index(str[i:], pattern) - if ind == -1 { - return -1 - } - return ind + i -} - -// lastIndexOf returns the last index of pattern in str, starting at str[i]. -func lastIndexOf(str string, pattern string, i int) int { - if i < 0 { - return -1 - } - if i >= len(str) { - return strings.LastIndex(str, pattern) - } - _, size := utf8.DecodeRuneInString(str[i:]) - return strings.LastIndex(str[:i+size], pattern) -} - -// runesIndexOf returns the index of pattern in target, starting at target[i]. -func runesIndexOf(target, pattern []rune, i int) int { - if i > len(target)-1 { - return -1 - } - if i <= 0 { - return runesIndex(target, pattern) - } - ind := runesIndex(target[i:], pattern) - if ind == -1 { - return -1 - } - return ind + i -} - -func runesEqual(r1, r2 []rune) bool { - if len(r1) != len(r2) { - return false - } - for i, c := range r1 { - if c != r2[i] { - return false - } - } - return true -} - -// runesIndex is the equivalent of strings.Index for rune slices. -func runesIndex(r1, r2 []rune) int { - last := len(r1) - len(r2) - for i := 0; i <= last; i++ { - if runesEqual(r1[i:i+len(r2)], r2) { - return i - } - } - return -1 -} diff --git a/vendor/github.com/yudai/gojsondiff/LICENSE b/vendor/github.com/yudai/gojsondiff/LICENSE index 445f43cdf..b778ed8c3 100644 --- a/vendor/github.com/yudai/gojsondiff/LICENSE +++ b/vendor/github.com/yudai/gojsondiff/LICENSE @@ -72,7 +72,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -## cli.go - https://github.com/urfave/cli +## cli.go - https://github.com/codegangsta/cli Copyright (C) 2013 Jeremy Saenz All Rights Reserved. diff --git a/vendor/github.com/yudai/gojsondiff/deltas.go b/vendor/github.com/yudai/gojsondiff/deltas.go index a15a137e3..403c5bf4c 100644 --- a/vendor/github.com/yudai/gojsondiff/deltas.go +++ b/vendor/github.com/yudai/gojsondiff/deltas.go @@ -451,7 +451,7 @@ func (d *Moved) PostApply(object interface{}) interface{} { } func (d *Moved) similarity() (similarity float64) { - similarity = 0.6 // as type and contents are same + similarity = 0.6 // as type and contens are same ratio := float64(d.PrePosition().(Index)) / float64(d.PostPosition().(Index)) if ratio > 1 { ratio = 1 / ratio diff --git a/vendor/github.com/yudai/gojsondiff/formatter/ascii.go b/vendor/github.com/yudai/gojsondiff/formatter/ascii.go index d86de1634..b30781327 100644 --- a/vendor/github.com/yudai/gojsondiff/formatter/ascii.go +++ b/vendor/github.com/yudai/gojsondiff/formatter/ascii.go @@ -201,16 +201,16 @@ func (f *AsciiFormatter) processItem(value interface{}, deltas []diff.Delta, pos return nil } -func (f *AsciiFormatter) searchDeltas(deltas []diff.Delta, position diff.Position) (results []diff.Delta) { +func (f *AsciiFormatter) searchDeltas(deltas []diff.Delta, postion diff.Position) (results []diff.Delta) { results = make([]diff.Delta, 0) for _, delta := range deltas { switch delta.(type) { case diff.PostDelta: - if delta.(diff.PostDelta).PostPosition() == position { + if delta.(diff.PostDelta).PostPosition() == postion { results = append(results, delta) } case diff.PreDelta: - if delta.(diff.PreDelta).PrePosition() == position { + if delta.(diff.PreDelta).PrePosition() == postion { results = append(results, delta) } default: @@ -362,7 +362,7 @@ func (f *AsciiFormatter) printRecursive(name string, value interface{}, marker s func sortedKeys(m map[string]interface{}) (keys []string) { keys = make([]string, 0, len(m)) - for key := range m { + for key, _ := range m { keys = append(keys, key) } sort.Strings(keys) diff --git a/vendor/gopkg.in/ini.v1/file.go b/vendor/gopkg.in/ini.v1/file.go index d7982c323..ce26c3b31 100644 --- a/vendor/gopkg.in/ini.v1/file.go +++ b/vendor/gopkg.in/ini.v1/file.go @@ -140,14 +140,9 @@ func (f *File) Section(name string) *Section { // Section returns list of Section. func (f *File) Sections() []*Section { - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - sections := make([]*Section, len(f.sectionList)) - for i, name := range f.sectionList { - sections[i] = f.sections[name] + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) } return sections } @@ -228,7 +223,7 @@ func (f *File) Append(source interface{}, others ...interface{}) error { func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { equalSign := "=" - if PrettyFormat || PrettyEqual { + if PrettyFormat { equalSign = " = " } @@ -305,10 +300,6 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { } else { key.Comment = key.Comment[:1] + " " + strings.TrimSpace(key.Comment[1:]) } - - // Support multiline comments - key.Comment = strings.Replace(key.Comment, "\n", "\n; ", -1) - if _, err := buf.WriteString(key.Comment + LineBreak); err != nil { return nil, err } diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go index 15ebc8f72..535d3588a 100644 --- a/vendor/gopkg.in/ini.v1/ini.go +++ b/vendor/gopkg.in/ini.v1/ini.go @@ -32,7 +32,7 @@ const ( // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - _VERSION = "1.37.0" + _VERSION = "1.32.0" ) // Version returns current package version literal. @@ -53,9 +53,6 @@ var ( // or reduce all possible spaces for compact format. PrettyFormat = true - // Place spaces around "=" sign even when PrettyFormat is false - PrettyEqual = false - // Explicitly write DEFAULT section header DefaultHeader = false @@ -140,16 +137,6 @@ type LoadOptions struct { // AllowNestedValues indicates whether to allow AWS-like nested values. // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values AllowNestedValues bool - // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. - // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure - // Relevant quote: Values can also span multiple lines, as long as they are indented deeper - // than the first line of the value. - AllowPythonMultilineValues bool - // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. - // Docs: https://docs.python.org/2/library/configparser.html - // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. - // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. - SpaceBeforeInlineComment bool // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" UnescapeValueDoubleQuotes bool diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go index d5aa2db60..db3af8f00 100644 --- a/vendor/gopkg.in/ini.v1/parser.go +++ b/vendor/gopkg.in/ini.v1/parser.go @@ -19,14 +19,11 @@ import ( "bytes" "fmt" "io" - "regexp" "strconv" "strings" "unicode" ) -var pythonMultiline = regexp.MustCompile("^(\\s+)([^\n]+)") - type tokenType int const ( @@ -197,8 +194,7 @@ func hasSurroundedQuote(in string, quote byte) bool { } func (p *parser) readValue(in []byte, - parserBufferSize int, - ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes, unescapeValueCommentSymbols, allowPythonMultilines, spaceBeforeInlineComment bool) (string, error) { + ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes, unescapeValueCommentSymbols bool) (string, error) { line := strings.TrimLeftFunc(string(in), unicode.IsSpace) if len(line) == 0 { @@ -228,34 +224,21 @@ func (p *parser) readValue(in []byte, return line[startIdx : pos+startIdx], nil } - lastChar := line[len(line)-1] // Won't be able to reach here if value only contains whitespace line = strings.TrimSpace(line) - trimmedLastChar := line[len(line)-1] // Check continuation lines when desired - if !ignoreContinuation && trimmedLastChar == '\\' { + if !ignoreContinuation && line[len(line)-1] == '\\' { return p.readContinuationLines(line[:len(line)-1]) } // Check if ignore inline comment if !ignoreInlineComment { - var i int - if spaceBeforeInlineComment { - i = strings.Index(line, " #") - if i == -1 { - i = strings.Index(line, " ;") - } - - } else { - i = strings.IndexAny(line, "#;") - } - + i := strings.IndexAny(line, "#;") if i > -1 { p.comment.WriteString(line[i:]) line = strings.TrimSpace(line[:i]) } - } // Trim single and double quotes @@ -269,50 +252,7 @@ func (p *parser) readValue(in []byte, if strings.Contains(line, `\#`) { line = strings.Replace(line, `\#`, "#", -1) } - } else if allowPythonMultilines && lastChar == '\n' { - parserBufferPeekResult, _ := p.buf.Peek(parserBufferSize) - peekBuffer := bytes.NewBuffer(parserBufferPeekResult) - - identSize := -1 - val := line - - for { - peekData, peekErr := peekBuffer.ReadBytes('\n') - if peekErr != nil { - if peekErr == io.EOF { - return val, nil - } - return "", peekErr - } - - peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) - if len(peekMatches) != 3 { - return val, nil - } - - currentIdentSize := len(peekMatches[1]) - // NOTE: Return if not a python-ini multi-line value. - if currentIdentSize < 0 { - return val, nil - } - identSize = currentIdentSize - - // NOTE: Just advance the parser reader (buffer) in-sync with the peek buffer. - _, err := p.readUntil('\n') - if err != nil { - return "", err - } - - val += fmt.Sprintf("\n%s", peekMatches[2]) - } - - // NOTE: If it was a Python multi-line value, - // return the appended value. - if identSize > 0 { - return val, nil - } } - return line, nil } @@ -336,29 +276,6 @@ func (f *File) parse(reader io.Reader) (err error) { var line []byte var inUnparseableSection bool - - // NOTE: Iterate and increase `currentPeekSize` until - // the size of the parser buffer is found. - // TODO: When Golang 1.10 is the lowest version supported, - // replace with `parserBufferSize := p.buf.Size()`. - parserBufferSize := 0 - // NOTE: Peek 1kb at a time. - currentPeekSize := 1024 - - if f.options.AllowPythonMultilineValues { - for { - peekBytes, _ := p.buf.Peek(currentPeekSize) - peekBytesLength := len(peekBytes) - - if parserBufferSize >= peekBytesLength { - break - } - - currentPeekSize *= 2 - parserBufferSize = peekBytesLength - } - } - for !p.isEOF { line, err = p.readUntil('\n') if err != nil { @@ -435,13 +352,10 @@ func (f *File) parse(reader io.Reader) (err error) { // Treat as boolean key when desired, and whole line is key name. if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { kname, err := p.readValue(line, - parserBufferSize, f.options.IgnoreContinuation, f.options.IgnoreInlineComment, f.options.UnescapeValueDoubleQuotes, - f.options.UnescapeValueCommentSymbols, - f.options.AllowPythonMultilineValues, - f.options.SpaceBeforeInlineComment) + f.options.UnescapeValueCommentSymbols) if err != nil { return err } @@ -465,13 +379,10 @@ func (f *File) parse(reader io.Reader) (err error) { } value, err := p.readValue(line[offset:], - parserBufferSize, f.options.IgnoreContinuation, f.options.IgnoreInlineComment, f.options.UnescapeValueDoubleQuotes, - f.options.UnescapeValueCommentSymbols, - f.options.AllowPythonMultilineValues, - f.options.SpaceBeforeInlineComment) + f.options.UnescapeValueCommentSymbols) if err != nil { return err }