diff --git a/cmd/loki/main.go b/cmd/loki/main.go index eab42aed36ae..a87b23f14ab8 100644 --- a/cmd/loki/main.go +++ b/cmd/loki/main.go @@ -74,19 +74,10 @@ func main() { // Start Loki t, err := loki.New(config) - if err != nil { - level.Error(util.Logger).Log("msg", "error initialising loki", "err", err) - os.Exit(1) - } + util.CheckFatal("initialising loki", err) level.Info(util.Logger).Log("msg", "Starting Loki", "version", version.Info()) - if err := t.Run(); err != nil { - level.Error(util.Logger).Log("msg", "error running loki", "err", err) - } - - if err := t.Stop(); err != nil { - level.Error(util.Logger).Log("msg", "error stopping loki", "err", err) - os.Exit(1) - } + err = t.Run() + util.CheckFatal("running loki", err) } diff --git a/go.sum b/go.sum index 419cf1e48c19..e19f081107ad 100644 --- a/go.sum +++ b/go.sum @@ -62,6 +62,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5 h1:PPfYWScYacO3Q6JMCLkyh6Ea2Q/REDTMgmiTAeiV8Jg= github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5/go.mod h1:xnKTFzjGUiZtiOagBsfnvomW+nJg2usB1ZpordQWqNM= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= @@ -76,8 +77,10 @@ github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= @@ -89,6 +92,7 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1C github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible h1:EaK5256H3ELiyaq5O/Zwd6fnghD6DqmZDQmmzzJklUU= github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -102,12 +106,14 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.22.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -128,6 +134,7 @@ github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQ github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee h1:BnPxIde0gjtTnc9Er7cxvBk8DHLWhEux0SxayC8dP6I= github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff v1.0.0 h1:2XeuDgvPv/6QDyzIuxb6n36ADVocyqTLlOSpYBGYtvM= github.com/cenkalti/backoff v1.0.0/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -185,6 +192,7 @@ github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11 github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dhui/dktest v0.3.0 h1:kwX5a7EkLcjo7VpsPQSYJcKGbXBXdjI9FGjuUj1jn6I= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= @@ -231,6 +239,7 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c h1:QwbffUs/+ptC4kTFPEN9Ej2latTq3bZJ5HO/OwPXYMs= github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c/go.mod h1:WQX+afhrekY9rGK+WT4xvKSlzmia9gDoLYu4GGYGASQ= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= @@ -255,40 +264,48 @@ github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpR github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4 h1:1TjOzrWkj+9BrjnM1yPAICbaoC0FyfD49oVkTBrSSa0= github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2 h1:rf5ArTHmIJxyV5Oiks+Su0mUens1+AjpkPoWr5xFRcI= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.3/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.4 h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2 h1:clPGfBnJohokno0e+d7hs6Yocrzjlgz6EsQSDncCRnE= github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= @@ -296,9 +313,11 @@ github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/ github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2 h1:ky5l57HjyVRrsJfd2+Ro5Z9PjGuKbsmftwyMtk8H7js= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= @@ -322,6 +341,7 @@ github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/status v1.0.3 h1:WkVBY59mw7qUNTr/bLwO7J2vesJ0rQ2C3tMXrTd3w5M= github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= +github.com/golang-migrate/migrate/v4 v4.7.0 h1:gONcHxHApDTKXDyLH/H97gEHmpu1zcnnbAaq2zgrPrs= github.com/golang-migrate/migrate/v4 v4.7.0/go.mod h1:Qvut3N4xKWjoH3sokBccML6WyHSnggXm/DvMMnTsQIc= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -357,6 +377,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= @@ -388,6 +409,7 @@ github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEo github.com/gophercloud/gophercloud v0.6.0 h1:Xb2lcqZtml1XjgYZxbeayEemq7ASbeTp09m36gQFpEU= github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -478,6 +500,7 @@ github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOpr github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= @@ -501,8 +524,10 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdl github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -524,13 +549,18 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939 h1:yZJImkCmVI6d1uJ9KRRf/96YbFLDQ/hhs6Xt9Z3OBXI= github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/leanovate/gopter v0.2.4 h1:U4YLBggDFhJdqQsG4Na2zX7joVTky9vHaj/AGEwSuXU= github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= @@ -539,6 +569,7 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -563,7 +594,9 @@ github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.22 h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc= github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/minio/minio-go/v6 v6.0.44/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= +github.com/minio/minio-go/v6 v6.0.49 h1:bU4kIa/qChTLC1jrWZ8F+8gOiw1MClubddAJVR4gW3w= github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= @@ -588,7 +621,9 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozillazg/go-cos v0.13.0 h1:RylOpEESdWMLb13bl0ADhko12uMN3JmHqqwFu4OYGBY= github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= +github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -596,6 +631,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v0.0.0-20170117200651-66bb6560562f/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= @@ -655,6 +691,7 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= github.com/prometheus/alertmanager v0.19.0/go.mod h1:Eyp94Yi/T+kdeb2qvq66E3RGuph5T/jm/RBVh4yz1xo= +github.com/prometheus/alertmanager v0.20.0 h1:PBMNY7oyIvYMBBIag35/C0hO7xn8+35p4V5rNAph5N8= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -709,6 +746,7 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -718,9 +756,11 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCL github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e h1:uO75wNGioszjmIzcY/tvdDYKRLVvzggtAmmJkn9j4GQ= github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M= github.com/sercand/kuberesolver v2.1.0+incompatible h1:iJ1oCzPQ/aacsbCWLfJW1hPKkHMvCEgNSA9kvWcb9MY= github.com/sercand/kuberesolver v2.1.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= @@ -737,14 +777,17 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -769,6 +812,7 @@ github.com/thanos-io/thanos v0.8.1-0.20200109203923-552ffa4c1a0d/go.mod h1:usT/T github.com/thanos-io/thanos v0.12.1-0.20200416112106-b391ca115ed8 h1:z7sOhoCEWnrQ2MIew3cJxsaxKT0AQu5pgXA8ZjdrYlk= github.com/thanos-io/thanos v0.12.1-0.20200416112106-b391ca115ed8/go.mod h1:+nN9AzmfaIH2e2KJGyRxX0BoUGrRSyZmp+U8ToRxlDc= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= @@ -812,6 +856,7 @@ go.etcd.io/etcd v0.0.0-20200401174654-e694b7bb0875 h1:61WXaq6CI2RsDa1qZEWkW4KruL go.etcd.io/etcd v0.0.0-20200401174654-e694b7bb0875/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.0.4/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.0 h1:aeOqSrhl9eDRAap/3T5pCfMBEBxZ0vuXBP+RMtp2KX8= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -1078,6 +1123,7 @@ gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -1092,6 +1138,7 @@ gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2 h1:XZx7nhd5GMaZpmDaEHFVafUZC7ya0fuo7cSJ3UCKYmM= gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 5e30adc46bcb..fd2cd2204fd6 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -4,7 +4,6 @@ import ( "context" "flag" "net/http" - "os" "sync/atomic" "time" @@ -16,7 +15,6 @@ import ( "github.com/cortexproject/cortex/pkg/util/services" "github.com/pkg/errors" - "github.com/go-kit/kit/log/level" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -34,7 +32,6 @@ const ( metricName = "logs" ) -var readinessProbeSuccess = []byte("Ready") var ( ingesterAppends = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: "loki", @@ -75,6 +72,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // Distributor coordinates replicates and distribution of log streams. type Distributor struct { + services.Service + cfg Config clientCfg client.Config ingestersRing ring.ReadRing @@ -85,6 +84,9 @@ type Distributor struct { // the number of healthy instances. distributorsRing *ring.Lifecycler + subservices *services.Manager + subservicesWatcher *services.FailureWatcher + // Per-user rate limiter. ingestionRateLimiter *limiter.RateLimiter } @@ -107,6 +109,8 @@ func New(cfg Config, clientCfg client.Config, ingestersRing ring.ReadRing, overr var ingestionRateStrategy limiter.RateLimiterStrategy var distributorsRing *ring.Lifecycler + var servs []services.Service + if overrides.IngestionRateStrategy() == validation.GlobalIngestionRateStrategy { var err error distributorsRing, err = ring.NewLifecycler(cfg.DistributorRing.ToLifecyclerConfig(), nil, "distributor", ring.DistributorRingKey, false) @@ -114,18 +118,7 @@ func New(cfg Config, clientCfg client.Config, ingestersRing ring.ReadRing, overr return nil, err } - distributorsRing.AddListener(services.NewListener(nil, nil, nil, nil, func(_ services.State, failure error) { - // lifecycler used to do os.Exit(1) on its own failure, but now it just goes into Failed state. - // for now we just simulate old behaviour here. When Distributor itself becomes a service, it will enter Failed state as well. - level.Error(cortex_util.Logger).Log("msg", "lifecycler failed", "err", err) - os.Exit(1) - })) - - err = services.StartAndAwaitRunning(context.Background(), distributorsRing) - if err != nil { - return nil, err - } - + servs = append(servs, distributorsRing) ingestionRateStrategy = newGlobalIngestionRateStrategy(overrides, distributorsRing) } else { ingestionRateStrategy = newLocalIngestionRateStrategy(overrides) @@ -141,18 +134,33 @@ func New(cfg Config, clientCfg client.Config, ingestersRing ring.ReadRing, overr ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), } - if err := services.StartAndAwaitRunning(context.Background(), d.pool); err != nil { - return nil, errors.Wrap(err, "starting client pool") + servs = append(servs, d.pool) + d.subservices, err = services.NewManager(servs...) + if err != nil { + return nil, errors.Wrap(err, "services manager") } + d.subservicesWatcher = services.NewFailureWatcher() + d.subservicesWatcher.WatchManager(d.subservices) + d.Service = services.NewBasicService(d.starting, d.running, d.stopping) return &d, nil } -func (d *Distributor) Stop() { - if d.distributorsRing != nil { - _ = services.StopAndAwaitTerminated(context.Background(), d.distributorsRing) +func (d *Distributor) starting(ctx context.Context) error { + return services.StartManagerAndAwaitHealthy(ctx, d.subservices) +} + +func (d *Distributor) running(ctx context.Context) error { + select { + case <-ctx.Done(): + return nil + case err := <-d.subservicesWatcher.Chan(): + return errors.Wrap(err, "distributor subservice failed") } - _ = services.StopAndAwaitTerminated(context.Background(), d.pool) +} + +func (d *Distributor) stopping(_ error) error { + return services.StopManagerAndAwaitStopped(context.Background(), d.subservices) } // TODO taken from Cortex, see if we can refactor out an usable interface. @@ -172,21 +180,6 @@ type pushTracker struct { err chan error } -// ReadinessHandler is used to indicate to k8s when the distributor is ready. -// Returns 200 when the distributor is ready, 500 otherwise. -func (d *Distributor) ReadinessHandler(w http.ResponseWriter, r *http.Request) { - _, err := d.ingestersRing.GetAll() - if err != nil { - http.Error(w, "Not ready: "+err.Error(), http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusOK) - if _, err := w.Write(readinessProbeSuccess); err != nil { - level.Error(cortex_util.Logger).Log("msg", "error writing success message", "error", err) - } -} - // Push a set of streams. func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*logproto.PushResponse, error) { userID, err := user.ExtractOrgID(ctx) diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 8a40bb4ad760..827bfa1d7917 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -15,6 +15,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/ring/kv/consul" "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" "github.com/prometheus/client_golang/prometheus" @@ -80,6 +81,7 @@ func TestDistributor(t *testing.T) { limits.MaxLineSize = fe.ByteSize(tc.maxLineSize) d := prepare(t, limits, nil) + defer services.StopAndAwaitTerminated(context.Background(), d) //nolint:errcheck request := makeWriteRequest(tc.lines, 10) @@ -163,7 +165,7 @@ func TestDistributor_PushIngestionRateLimiter(t *testing.T) { distributors := make([]*Distributor, testData.distributors) for i := 0; i < testData.distributors; i++ { distributors[i] = prepare(t, limits, kvStore) - defer distributors[i].Stop() + defer services.StopAndAwaitTerminated(context.Background(), distributors[i]) //nolint:errcheck } // If the distributors ring is setup, wait until the first distributor @@ -226,6 +228,7 @@ func prepare(t *testing.T, limits *validation.Limits, kvStore kv.Client) *Distri d, err := New(distributorConfig, clientConfig, ingestersRing, overrides) require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), d)) return d } diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go index 82e236e61c76..ff2ac662d8ce 100644 --- a/pkg/ingester/flush_test.go +++ b/pkg/ingester/flush_test.go @@ -13,6 +13,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/services" "github.com/grafana/loki/pkg/chunkenc" "github.com/grafana/loki/pkg/ingester/client" @@ -43,6 +44,7 @@ func TestChunkFlushingIdle(t *testing.T) { cfg.RetainPeriod = 500 * time.Millisecond store, ing := newTestStore(t, cfg) + defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck testData := pushTestSamples(t, ing) // wait beyond idle time so samples flush @@ -53,7 +55,7 @@ func TestChunkFlushingIdle(t *testing.T) { func TestChunkFlushingShutdown(t *testing.T) { store, ing := newTestStore(t, defaultIngesterTestConfig(t)) testData := pushTestSamples(t, ing) - ing.Shutdown() + require.NoError(t, services.StopAndAwaitTerminated(context.Background(), ing)) store.checkData(t, testData) } @@ -90,7 +92,7 @@ func TestFlushingCollidingLabels(t *testing.T) { require.NoError(t, err) // force flush - ing.Shutdown() + require.NoError(t, services.StopAndAwaitTerminated(context.Background(), ing)) // verify that we get all the data back store.checkData(t, map[string][]*logproto.Stream{userID: req.Streams}) @@ -154,6 +156,7 @@ func TestFlushMaxAge(t *testing.T) { }, }) + require.NoError(t, services.StopAndAwaitTerminated(context.Background(), ing)) } type testStore struct { @@ -172,6 +175,7 @@ func newTestStore(t require.TestingT, cfg Config) (*testStore, *Ingester) { ing, err := New(cfg, client.Config{}, store, limits) require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) return store, ing } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index cc15eadf2a3c..78bfe3c148f5 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -5,12 +5,9 @@ import ( "errors" "flag" "fmt" - "net/http" - "os" "sync" "time" - "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/weaveworks/common/user" @@ -35,8 +32,6 @@ import ( // attempted. var ErrReadOnly = errors.New("Ingester is shutting down") -var readinessProbeSuccess = []byte("Ready") - var flushQueueLength = promauto.NewGauge(prometheus.GaugeOpts{ Name: "cortex_ingester_flush_queue_length", Help: "The total number of series pending in the flush queue.", @@ -93,6 +88,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // Ingester builds chunks for incoming log streams. type Ingester struct { + services.Service + cfg Config clientConfig client.Config @@ -101,12 +98,14 @@ type Ingester struct { instances map[string]*instance readonly bool - lifecycler *ring.Lifecycler - store ChunkStore + lifecycler *ring.Lifecycler + lifecyclerWatcher *services.FailureWatcher - done sync.WaitGroup - quit chan struct{} - quitting chan struct{} + store ChunkStore + + loopDone sync.WaitGroup + loopQuit chan struct{} + tailersQuit chan struct{} // One queue per flush thread. Fingerprint is used to // pick a queue. @@ -138,49 +137,94 @@ func New(cfg Config, clientConfig client.Config, store ChunkStore, limits *valid clientConfig: clientConfig, instances: map[string]*instance{}, store: store, - quit: make(chan struct{}), + loopQuit: make(chan struct{}), flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes), - quitting: make(chan struct{}), + tailersQuit: make(chan struct{}), factory: func() chunkenc.Chunk { return chunkenc.NewMemChunk(enc, cfg.BlockSize, cfg.TargetChunkSize) }, } - i.flushQueuesDone.Add(cfg.ConcurrentFlushes) - for j := 0; j < cfg.ConcurrentFlushes; j++ { - i.flushQueues[j] = util.NewPriorityQueue(flushQueueLength) - go i.flushLoop(j) - } - i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester", ring.IngesterRingKey, true) if err != nil { return nil, err } - i.lifecycler.AddListener(services.NewListener(nil, nil, nil, nil, func(_ services.State, failure error) { - // lifecycler used to do os.Exit(1) on its own failure, but now it just goes into Failed state. - // for now we just simulate old behaviour here. When Ingester itself becomes a service, it will enter Failed state as well. - level.Error(util.Logger).Log("msg", "lifecycler failed", "err", err) - os.Exit(1) - })) - - err = services.StartAndAwaitRunning(context.Background(), i.lifecycler) - if err != nil { - return nil, err - } + i.lifecyclerWatcher = services.NewFailureWatcher() + i.lifecyclerWatcher.WatchService(i.lifecycler) // Now that the lifecycler has been created, we can create the limiter // which depends on it. i.limiter = NewLimiter(limits, i.lifecycler, cfg.LifecyclerConfig.RingConfig.ReplicationFactor) - i.done.Add(1) + i.Service = services.NewBasicService(i.starting, i.running, i.stopping) + return i, nil +} + +func (i *Ingester) starting(ctx context.Context) error { + i.flushQueuesDone.Add(i.cfg.ConcurrentFlushes) + for j := 0; j < i.cfg.ConcurrentFlushes; j++ { + i.flushQueues[j] = util.NewPriorityQueue(flushQueueLength) + go i.flushLoop(j) + } + + // pass new context to lifecycler, so that it doesn't stop automatically when Ingester's service context is done + err := i.lifecycler.StartAsync(context.Background()) + if err != nil { + return err + } + + err = i.lifecycler.AwaitRunning(ctx) + if err != nil { + return err + } + + // start our loop + i.loopDone.Add(1) go i.loop() + return nil +} - return i, nil +func (i *Ingester) running(ctx context.Context) error { + var serviceError error + select { + // wait until service is asked to stop + case <-ctx.Done(): + // stop + case err := <-i.lifecyclerWatcher.Chan(): + serviceError = fmt.Errorf("lifecycler failed: %w", err) + } + + // close tailers before stopping our loop + close(i.tailersQuit) + for _, instance := range i.getInstances() { + instance.closeTailers() + } + + close(i.loopQuit) + i.loopDone.Wait() + return serviceError +} + +// Called after running exits, when Ingester transitions to Stopping state. +// At this point, loop no longer runs, but flushers are still running. +func (i *Ingester) stopping(_ error) error { + i.stopIncomingRequests() + + err := services.StopAndAwaitTerminated(context.Background(), i.lifecycler) + + // Normally, flushers are stopped via lifecycler (in transferOut), but if lifecycler fails, + // we better stop them. + for _, flushQueue := range i.flushQueues { + flushQueue.Close() + } + i.flushQueuesDone.Wait() + + return err } func (i *Ingester) loop() { - defer i.done.Done() + defer i.loopDone.Done() flushTicker := time.NewTicker(i.cfg.FlushCheckPeriod) defer flushTicker.Stop() @@ -190,33 +234,12 @@ func (i *Ingester) loop() { case <-flushTicker.C: i.sweepUsers(false) - case <-i.quit: + case <-i.loopQuit: return } } } -// Shutdown stops the ingester. -func (i *Ingester) Shutdown() { - close(i.quit) - i.done.Wait() - - i.stopIncomingRequests() - - err := services.StopAndAwaitTerminated(context.Background(), i.lifecycler) - if err != nil { - level.Error(util.Logger).Log("msg", "lifecycler failed", "err", err) - } -} - -// Stopping helps cleaning up resources before actual shutdown -func (i *Ingester) Stopping() { - close(i.quitting) - for _, instance := range i.getInstances() { - instance.closeTailers() - } -} - // Push implements logproto.Pusher. func (i *Ingester) Push(ctx context.Context, req *logproto.PushRequest) (*logproto.PushResponse, error) { instanceID, err := user.ExtractOrgID(ctx) @@ -313,18 +336,13 @@ func (*Ingester) Watch(*grpc_health_v1.HealthCheckRequest, grpc_health_v1.Health } // ReadinessHandler is used to indicate to k8s when the ingesters are ready for -// the addition removal of another ingester. Returns 200 when the ingester is +// the addition removal of another ingester. Returns 204 when the ingester is // ready, 500 otherwise. -func (i *Ingester) ReadinessHandler(w http.ResponseWriter, r *http.Request) { - if err := i.lifecycler.CheckReady(r.Context()); err != nil { - http.Error(w, "Not ready: "+err.Error(), http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusOK) - if _, err := w.Write(readinessProbeSuccess); err != nil { - level.Error(util.Logger).Log("msg", "error writing success message", "error", err) +func (i *Ingester) CheckReady(ctx context.Context) error { + if s := i.State(); s != services.Running && s != services.Stopping { + return fmt.Errorf("ingester not ready: %v", s) } + return i.lifecycler.CheckReady(ctx) } func (i *Ingester) getInstanceByID(id string) (*instance, bool) { @@ -349,7 +367,7 @@ func (i *Ingester) getInstances() []*instance { // Tail logs matching given query func (i *Ingester) Tail(req *logproto.TailRequest, queryServer logproto.Querier_TailServer) error { select { - case <-i.quitting: + case <-i.tailersQuit: return errors.New("Ingester is stopping") default: } diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 4bf9f5b0062b..e9e3dc1165ba 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -7,9 +7,9 @@ import ( "testing" "time" - "github.com/grafana/loki/pkg/iter" - "github.com/grafana/loki/pkg/logql" - + "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/services" "github.com/stretchr/testify/require" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/user" @@ -17,11 +17,10 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/metadata" - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/util/flagext" - "github.com/grafana/loki/pkg/ingester/client" + "github.com/grafana/loki/pkg/iter" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/util/validation" ) @@ -36,7 +35,7 @@ func TestIngester(t *testing.T) { i, err := New(ingesterConfig, client.Config{}, store, limits) require.NoError(t, err) - defer i.Shutdown() + defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck req := logproto.PushRequest{ Streams: []*logproto.Stream{ @@ -204,7 +203,7 @@ func TestIngesterStreamLimitExceeded(t *testing.T) { i, err := New(ingesterConfig, client.Config{}, store, overrides) require.NoError(t, err) - defer i.Shutdown() + defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck req := logproto.PushRequest{ Streams: []*logproto.Stream{ diff --git a/pkg/ingester/transfer.go b/pkg/ingester/transfer.go index ebe7ffc7ff06..9cc91ec20d61 100644 --- a/pkg/ingester/transfer.go +++ b/pkg/ingester/transfer.go @@ -173,7 +173,7 @@ func (i *Ingester) stopIncomingRequests() { // TransferOut implements ring.Lifecycler. func (i *Ingester) TransferOut(ctx context.Context) error { if i.cfg.MaxTransferRetries <= 0 { - return fmt.Errorf("transfers disabled") + return ring.ErrTransferDisabled } backoff := util.NewBackoff(ctx, util.BackoffConfig{ diff --git a/pkg/ingester/transfer_test.go b/pkg/ingester/transfer_test.go index 887829bfeb23..3ecbd8911ca2 100644 --- a/pkg/ingester/transfer_test.go +++ b/pkg/ingester/transfer_test.go @@ -76,7 +76,8 @@ func TestTransferOut(t *testing.T) { // Create a new ingester and transfer data to it ing2 := f.getIngester(time.Second*60, t) - ing.Shutdown() + defer services.StopAndAwaitTerminated(context.Background(), ing2) //nolint:errcheck + require.NoError(t, services.StopAndAwaitTerminated(context.Background(), ing)) assert.Len(t, ing2.instances, 1) if assert.Contains(t, ing2.instances, "test") { diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index e7ce6b36063b..39a77ab9f30f 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -1,8 +1,11 @@ package loki import ( + "bytes" + "context" "flag" "fmt" + "net/http" "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/querier/frontend" @@ -10,6 +13,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" + "github.com/cortexproject/cortex/pkg/util/services" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" @@ -96,6 +100,9 @@ func (c *Config) Validate(log log.Logger) error { type Loki struct { cfg Config + // set during initialization + serviceMap map[moduleName]services.Service + server *server.Server ring *ring.Ring overrides *validation.Overrides @@ -120,10 +127,14 @@ func New(cfg Config) (*Loki, error) { loki.setupAuthMiddleware() - if err := loki.init(cfg.Target); err != nil { + serviceMap, err := loki.initModuleServices(cfg.Target) + if err != nil { return nil, err } + loki.serviceMap = serviceMap + loki.server.HTTP.Handle("/services", http.HandlerFunc(loki.servicesHandler)) + return loki, nil } @@ -159,72 +170,146 @@ func (t *Loki) setupAuthMiddleware() { } } -func (t *Loki) init(m moduleName) error { +func (t *Loki) initModuleServices(target moduleName) (map[moduleName]services.Service, error) { + servicesMap := map[moduleName]services.Service{} + // initialize all of our dependencies first - for _, dep := range orderedDeps(m) { - if err := t.initModule(dep); err != nil { - return err + deps := orderedDeps(target) + deps = append(deps, target) // lastly, initialize the requested module + + for ix, n := range deps { + mod := modules[n] + + var serv services.Service + + if mod.service != nil { + s, err := mod.service(t) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("error initialising module: %s", n)) + } + serv = s + } else if mod.wrappedService != nil { + s, err := mod.wrappedService(t) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("error initialising module: %s", n)) + } + if s != nil { + // We pass servicesMap, which isn't yet finished. By the time service starts, + // it will be fully built, so there is no need for extra synchronization. + serv = newModuleServiceWrapper(servicesMap, n, s, mod.deps, findInverseDependencies(n, deps[ix+1:])) + } } - } - // lastly, initialize the requested module - return t.initModule(m) -} -func (t *Loki) initModule(m moduleName) error { - level.Info(util.Logger).Log("msg", "initialising", "module", m) - if modules[m].init != nil { - if err := modules[m].init(t); err != nil { - return errors.Wrap(err, fmt.Sprintf("error initialising module: %s", m)) + if serv != nil { + servicesMap[n] = serv } } - return nil -} -// Run starts Loki running, and blocks until a signal is received. -func (t *Loki) Run() error { - return t.server.Run() + return servicesMap, nil } -// Stop gracefully stops a Loki. -func (t *Loki) Stop() error { - t.stopping(t.cfg.Target) - t.stop(t.cfg.Target) - t.server.Shutdown() - return nil -} +// Run starts Loki running, and blocks until a Loki stops. +func (t *Loki) Run() error { + // get all services, create service manager and tell it to start + var servs []services.Service + for _, s := range t.serviceMap { + servs = append(servs, s) + } -func (t *Loki) stop(m moduleName) { - t.stopModule(m) - deps := orderedDeps(m) - // iterate over our deps in reverse order and call stopModule - for i := len(deps) - 1; i >= 0; i-- { - t.stopModule(deps[i]) + sm, err := services.NewManager(servs...) + if err != nil { + return err } -} -func (t *Loki) stopModule(m moduleName) { - level.Info(util.Logger).Log("msg", "stopping", "module", m) - if modules[m].stop != nil { - if err := modules[m].stop(t); err != nil { - level.Error(util.Logger).Log("msg", "error stopping", "module", m, "err", err) + // before starting servers, register /ready handler. It should reflect entire Loki. + t.server.HTTP.Path("/ready").Handler(t.readyHandler(sm)) + + // Let's listen for events from this manager, and log them. + healthy := func() { level.Info(util.Logger).Log("msg", "Loki started") } + stopped := func() { level.Info(util.Logger).Log("msg", "Loki stopped") } + serviceFailed := func(service services.Service) { + // if any service fails, stop entire Loki + sm.StopAsync() + + // let's find out which module failed + for m, s := range t.serviceMap { + if s == service { + if service.FailureCase() == util.ErrStopProcess { + level.Info(util.Logger).Log("msg", "received stop signal via return error", "module", m, "error", service.FailureCase()) + } else { + level.Error(util.Logger).Log("msg", "module failed", "module", m, "error", service.FailureCase()) + } + return + } } + + level.Error(util.Logger).Log("msg", "module failed", "module", "unknown", "error", service.FailureCase()) + } + + sm.AddListener(services.NewManagerListener(healthy, stopped, serviceFailed)) + + // Currently it's the Server that reacts on signal handler, + // so get Server service, and wait until it gets to Stopping state. + // It will also be stopped via service manager if any service fails (see attached service listener) + // Attach listener before starting services, or we may miss the notification. + serverStopping := make(chan struct{}) + t.serviceMap[Server].AddListener(services.NewListener(nil, nil, func(from services.State) { + close(serverStopping) + }, nil, nil)) + + // Start all services. This can really only fail if some service is already + // in other state than New, which should not be the case. + err = sm.StartAsync(context.Background()) + if err == nil { + // no error starting the services, now let's just wait until Server module + // transitions to Stopping (after SIGTERM or when some service fails), + // and then initiate shutdown + <-serverStopping } -} -func (t *Loki) stopping(m moduleName) { - t.stoppingModule(m) - deps := orderedDeps(m) - // iterate over our deps in reverse order and call stoppingModule - for i := len(deps) - 1; i >= 0; i-- { - t.stoppingModule(deps[i]) + // Stop all the services, and wait until they are all done. + // We don't care about this error, as it cannot really fail. + _ = services.StopManagerAndAwaitStopped(context.Background(), sm) + + // if any service failed, report that as an error to caller + if err == nil { + if failed := sm.ServicesByState()[services.Failed]; len(failed) > 0 { + for _, f := range failed { + if f.FailureCase() != util.ErrStopProcess { + // Details were reported via failure listener before + err = errors.New("failed services") + break + } + } + } } + return err } -func (t *Loki) stoppingModule(m moduleName) { - level.Info(util.Logger).Log("msg", "notifying module about stopping", "module", m) - if modules[m].stopping != nil { - if err := modules[m].stopping(t); err != nil { - level.Error(util.Logger).Log("msg", "error stopping", "module", m, "err", err) +func (t *Loki) readyHandler(sm *services.Manager) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !sm.IsHealthy() { + msg := bytes.Buffer{} + msg.WriteString("Some services are not Running:\n") + + byState := sm.ServicesByState() + for st, ls := range byState { + msg.WriteString(fmt.Sprintf("%v: %d\n", st, len(ls))) + } + + http.Error(w, msg.String(), http.StatusServiceUnavailable) + return + } + + // Ingester has a special check that makes sure that it was able to register into the ring, + // and that all other ring entries are OK too. + if t.ingester != nil { + if err := t.ingester.CheckReady(r.Context()); err != nil { + http.Error(w, "Ingester not ready: "+err.Error(), http.StatusServiceUnavailable) + return + } } + + http.Error(w, "ready", http.StatusOK) } } diff --git a/pkg/loki/module_service_wrapper.go b/pkg/loki/module_service_wrapper.go new file mode 100644 index 000000000000..1b6d8c31e985 --- /dev/null +++ b/pkg/loki/module_service_wrapper.go @@ -0,0 +1,30 @@ +package loki + +import ( + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" +) + +// This function wraps module service, and adds waiting for dependencies to start before starting, +// and dependant modules to stop before stopping this module service. +func newModuleServiceWrapper(serviceMap map[moduleName]services.Service, mod moduleName, modServ services.Service, startDeps []moduleName, stopDeps []moduleName) services.Service { + getDeps := func(deps []moduleName) map[string]services.Service { + r := map[string]services.Service{} + for _, m := range deps { + s := serviceMap[m] + if s != nil { + r[string(m)] = s + } + } + return r + } + + return util.NewModuleService(string(mod), modServ, + func(_ string) map[string]services.Service { + return getDeps(startDeps) + }, + func(_ string) map[string]services.Service { + return getDeps(stopDeps) + }, + ) +} diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 3c98f2d71133..2f3504eed1a8 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -1,7 +1,6 @@ package loki import ( - "context" "fmt" "net/http" "os" @@ -11,6 +10,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/storage" + "github.com/cortexproject/cortex/pkg/cortex" "github.com/cortexproject/cortex/pkg/querier/frontend" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/codec" @@ -79,31 +79,43 @@ func (m *moduleName) Set(s string) error { return nil } -func (t *Loki) initServer() (err error) { - t.server, err = server.New(t.cfg.Server) - return +func (t *Loki) initServer() (services.Service, error) { + serv, err := server.New(t.cfg.Server) + if err != nil { + return nil, err + } + + t.server = serv + + servicesToWaitFor := func() []services.Service { + svs := []services.Service(nil) + for m, s := range t.serviceMap { + // Server should not wait for itself. + if m != Server { + svs = append(svs, s) + } + } + return svs + } + + s := cortex.NewServerService(t.server, servicesToWaitFor) + + return s, nil } -func (t *Loki) initRing() (err error) { +func (t *Loki) initRing() (_ services.Service, err error) { t.cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) t.cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV t.ring, err = ring.New(t.cfg.Ingester.LifecyclerConfig.RingConfig, "ingester", ring.IngesterRingKey) - if err == nil { - err = services.StartAndAwaitRunning(context.Background(), t.ring) - } if err != nil { return } prometheus.MustRegister(t.ring) t.server.HTTP.Handle("/ring", t.ring) - return -} - -func (t *Loki) stopRing() (err error) { - return services.StopAndAwaitTerminated(context.Background(), t.ring) + return t.ring, nil } -func (t *Loki) initRuntimeConfig() (err error) { +func (t *Loki) initRuntimeConfig() (services.Service, error) { if t.cfg.RuntimeConfig.LoadPath == "" { t.cfg.RuntimeConfig.LoadPath = t.cfg.LimitsConfig.PerTenantOverrideConfig t.cfg.RuntimeConfig.ReloadPeriod = t.cfg.LimitsConfig.PerTenantOverridePeriod @@ -113,71 +125,51 @@ func (t *Loki) initRuntimeConfig() (err error) { // make sure to set default limits before we start loading configuration into memory validation.SetDefaultLimitsForYAMLUnmarshalling(t.cfg.LimitsConfig) + var err error t.runtimeConfig, err = runtimeconfig.NewRuntimeConfigManager(t.cfg.RuntimeConfig, prometheus.DefaultRegisterer) - if err == nil { - err = services.StartAndAwaitRunning(context.Background(), t.runtimeConfig) - } - return err + return t.runtimeConfig, err } -func (t *Loki) stopRuntimeConfig() (err error) { - return services.StopAndAwaitTerminated(context.Background(), t.runtimeConfig) -} - -func (t *Loki) initOverrides() (err error) { +func (t *Loki) initOverrides() (_ services.Service, err error) { t.overrides, err = validation.NewOverrides(t.cfg.LimitsConfig, tenantLimitsFromRuntimeConfig(t.runtimeConfig)) - return err + // overrides are not a service, since they don't have any operational state. + return nil, err } -func (t *Loki) initDistributor() (err error) { +func (t *Loki) initDistributor() (services.Service, error) { t.cfg.Distributor.DistributorRing.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) t.cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV + var err error t.distributor, err = distributor.New(t.cfg.Distributor, t.cfg.IngesterClient, t.ring, t.overrides) if err != nil { - return + return nil, err } pushHandler := middleware.Merge( t.httpAuthMiddleware, ).Wrap(http.HandlerFunc(t.distributor.PushHandler)) - t.server.HTTP.Path("/ready").Handler(http.HandlerFunc(t.distributor.ReadinessHandler)) - t.server.HTTP.Handle("/api/prom/push", pushHandler) t.server.HTTP.Handle("/loki/api/v1/push", pushHandler) - return + return t.distributor, nil } -func (t *Loki) stopDistributor() (err error) { - t.distributor.Stop() - return nil -} - -func (t *Loki) initQuerier() error { +func (t *Loki) initQuerier() (services.Service, error) { level.Debug(util.Logger).Log("msg", "initializing querier worker", "config", fmt.Sprintf("%+v", t.cfg.Worker)) worker, err := frontend.NewWorker(t.cfg.Worker, httpgrpc_server.NewServer(t.server.HTTPServer.Handler), util.Logger) if err != nil { - return err - } - // worker is nil, if no address is defined. - if worker != nil { - err = services.StartAndAwaitRunning(context.Background(), worker) - if err != nil { - return err - } + return nil, err } t.querier, err = querier.New(t.cfg.Querier, t.cfg.IngesterClient, t.ring, t.store, t.overrides) if err != nil { - return err + return nil, err } httpMiddleware := middleware.Merge( t.httpAuthMiddleware, querier.NewPrepopulateMiddleware(), ) - t.server.HTTP.Path("/ready").Handler(http.HandlerFunc(t.querier.ReadinessHandler)) - t.server.HTTP.Handle("/loki/api/v1/query_range", httpMiddleware.Wrap(http.HandlerFunc(t.querier.RangeQueryHandler))) t.server.HTTP.Handle("/loki/api/v1/query", httpMiddleware.Wrap(http.HandlerFunc(t.querier.InstantQueryHandler))) // Prometheus compatibility requires `loki/api/v1/labels` however we already released `loki/api/v1/label` @@ -193,10 +185,10 @@ func (t *Loki) initQuerier() error { t.server.HTTP.Handle("/api/prom/label/{name}/values", httpMiddleware.Wrap(http.HandlerFunc(t.querier.LabelHandler))) t.server.HTTP.Handle("/api/prom/tail", httpMiddleware.Wrap(http.HandlerFunc(t.querier.TailHandler))) t.server.HTTP.Handle("/api/prom/series", httpMiddleware.Wrap(http.HandlerFunc(t.querier.SeriesHandler))) - return nil + return worker, nil // ok if worker is nil here } -func (t *Loki) initIngester() (err error) { +func (t *Loki) initIngester() (_ services.Service, err error) { t.cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) t.cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV t.cfg.Ingester.LifecyclerConfig.ListenPort = &t.cfg.Server.GRPCListenPort @@ -217,25 +209,14 @@ func (t *Loki) initIngester() (err error) { logproto.RegisterQuerierServer(t.server.GRPC, t.ingester) logproto.RegisterIngesterServer(t.server.GRPC, t.ingester) grpc_health_v1.RegisterHealthServer(t.server.GRPC, t.ingester) - t.server.HTTP.Path("/ready").Handler(http.HandlerFunc(t.ingester.ReadinessHandler)) t.server.HTTP.Path("/flush").Handler(http.HandlerFunc(t.ingester.FlushHandler)) - return + return t.ingester, nil } -func (t *Loki) stopIngester() error { - t.ingester.Shutdown() - return nil -} - -func (t *Loki) stoppingIngester() error { - t.ingester.Stopping() - return nil -} - -func (t *Loki) initTableManager() error { +func (t *Loki) initTableManager() (services.Service, error) { err := t.cfg.SchemaConfig.Load() if err != nil { - return err + return nil, err } // Assume the newest config is the one to use @@ -256,7 +237,7 @@ func (t *Loki) initTableManager() error { tableClient, err := loki_storage.NewTableClient(lastConfig.IndexType, t.cfg.StorageConfig) if err != nil { - return err + return nil, err } bucketClient, err := storage.NewBucketClient(t.cfg.StorageConfig.Config) @@ -264,30 +245,13 @@ func (t *Loki) initTableManager() error { t.tableManager, err = chunk.NewTableManager(t.cfg.TableManager, t.cfg.SchemaConfig, maxChunkAgeForTableManager, tableClient, bucketClient, prometheus.DefaultRegisterer) if err != nil { - return err + return nil, err } - if err := services.StartAndAwaitRunning(context.Background(), t.tableManager); err != nil { - return err - } - - // Once the execution reaches this point, synchronous table initialization has been - // done and the table-manager is ready to serve, so we're just returning a 200. - t.server.HTTP.Path("/ready").Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - if _, err := w.Write([]byte("Ready")); err != nil { - level.Error(util.Logger).Log("msg", "error writing success message", "error", err) - } - })) - - return nil -} - -func (t *Loki) stopTableManager() error { - return services.StopAndAwaitTerminated(context.Background(), t.tableManager) + return t.tableManager, nil } -func (t *Loki) initStore() (err error) { +func (t *Loki) initStore() (_ services.Service, err error) { if activeIndexType(t.cfg.SchemaConfig) == local.BoltDBShipperType { t.cfg.StorageConfig.BoltDBShipperConfig.IngesterName = t.cfg.Ingester.LifecyclerConfig.ID switch t.cfg.Target { @@ -303,15 +267,17 @@ func (t *Loki) initStore() (err error) { } t.store, err = loki_storage.NewStore(t.cfg.StorageConfig, t.cfg.ChunkStoreConfig, t.cfg.SchemaConfig, t.overrides) - return -} + if err != nil { + return + } -func (t *Loki) stopStore() error { - t.store.Stop() - return nil + return services.NewIdleService(nil, func(_ error) error { + t.store.Stop() + return nil + }), nil } -func (t *Loki) initQueryFrontend() (err error) { +func (t *Loki) initQueryFrontend() (_ services.Service, err error) { level.Debug(util.Logger).Log("msg", "initializing query frontend", "config", fmt.Sprintf("%+v", t.cfg.Frontend)) t.frontend, err = frontend.New(t.cfg.Frontend, util.Logger, prometheus.DefaultRegisterer) if err != nil { @@ -323,7 +289,7 @@ func (t *Loki) initQueryFrontend() (err error) { ) tripperware, stopper, err := queryrange.NewTripperware(t.cfg.QueryRange, util.Logger, t.overrides, prometheus.DefaultRegisterer) if err != nil { - return err + return } t.stopper = stopper t.frontend.Wrap(tripperware) @@ -342,29 +308,26 @@ func (t *Loki) initQueryFrontend() (err error) { t.server.HTTP.Handle("/api/prom/series", frontendHandler) // fallback route t.server.HTTP.PathPrefix("/").Handler(frontendHandler) - return -} -func (t *Loki) stopQueryFrontend() error { - t.frontend.Close() - if t.stopper != nil { - t.stopper.Stop() - } - return nil + return services.NewIdleService(nil, func(_ error) error { + t.frontend.Close() + if t.stopper != nil { + t.stopper.Stop() + } + return nil + }), nil } -func (t *Loki) initMemberlistKV() error { +func (t *Loki) initMemberlistKV() (services.Service, error) { t.cfg.MemberlistKV.MetricsRegisterer = prometheus.DefaultRegisterer t.cfg.MemberlistKV.Codecs = []codec.Codec{ ring.GetCodec(), } t.memberlistKV = memberlist.NewKVInit(&t.cfg.MemberlistKV) - return nil -} - -func (t *Loki) stopMemberlistKV() error { - t.memberlistKV.Stop() - return nil + return services.NewIdleService(nil, func(_ error) error { + t.memberlistKV.Stop() + return nil + }), nil } // listDeps recursively gets a list of dependencies for a passed moduleName @@ -425,73 +388,84 @@ func uniqueDeps(deps []moduleName) []moduleName { return result } +// find modules in the supplied list, that depend on mod +func findInverseDependencies(mod moduleName, mods []moduleName) []moduleName { + result := []moduleName(nil) + + for _, n := range mods { + for _, d := range modules[n].deps { + if d == mod { + result = append(result, n) + break + } + } + } + + return result +} + type module struct { - deps []moduleName - init func(t *Loki) error - stopping func(t *Loki) error - stop func(t *Loki) error + deps []moduleName + + // service for this module (can return nil) + service func(t *Loki) (services.Service, error) + + // service that will be wrapped into moduleServiceWrapper, to wait for dependencies to start / end + // (can return nil) + wrappedService func(t *Loki) (services.Service, error) } var modules = map[moduleName]module{ Server: { - init: (*Loki).initServer, + service: (*Loki).initServer, }, RuntimeConfig: { - init: (*Loki).initRuntimeConfig, - stop: (*Loki).stopRuntimeConfig, + wrappedService: (*Loki).initRuntimeConfig, }, MemberlistKV: { - init: (*Loki).initMemberlistKV, - stop: (*Loki).stopMemberlistKV, + wrappedService: (*Loki).initMemberlistKV, }, Ring: { - deps: []moduleName{RuntimeConfig, Server, MemberlistKV}, - init: (*Loki).initRing, - stop: (*Loki).stopRing, + deps: []moduleName{RuntimeConfig, Server, MemberlistKV}, + wrappedService: (*Loki).initRing, }, Overrides: { - deps: []moduleName{RuntimeConfig}, - init: (*Loki).initOverrides, + deps: []moduleName{RuntimeConfig}, + wrappedService: (*Loki).initOverrides, }, Distributor: { - deps: []moduleName{Ring, Server, Overrides}, - init: (*Loki).initDistributor, - stop: (*Loki).stopDistributor, + deps: []moduleName{Ring, Server, Overrides}, + wrappedService: (*Loki).initDistributor, }, Store: { - deps: []moduleName{Overrides}, - init: (*Loki).initStore, - stop: (*Loki).stopStore, + deps: []moduleName{Overrides}, + wrappedService: (*Loki).initStore, }, Ingester: { - deps: []moduleName{Store, Server, MemberlistKV}, - init: (*Loki).initIngester, - stop: (*Loki).stopIngester, - stopping: (*Loki).stoppingIngester, + deps: []moduleName{Store, Server, MemberlistKV}, + wrappedService: (*Loki).initIngester, }, Querier: { - deps: []moduleName{Store, Ring, Server}, - init: (*Loki).initQuerier, + deps: []moduleName{Store, Ring, Server}, + wrappedService: (*Loki).initQuerier, }, QueryFrontend: { - deps: []moduleName{Server, Overrides}, - init: (*Loki).initQueryFrontend, - stop: (*Loki).stopQueryFrontend, + deps: []moduleName{Server, Overrides}, + wrappedService: (*Loki).initQueryFrontend, }, TableManager: { - deps: []moduleName{Server}, - init: (*Loki).initTableManager, - stop: (*Loki).stopTableManager, + deps: []moduleName{Server}, + wrappedService: (*Loki).initTableManager, }, All: { diff --git a/pkg/loki/status.go b/pkg/loki/status.go new file mode 100644 index 000000000000..fea652c05f94 --- /dev/null +++ b/pkg/loki/status.go @@ -0,0 +1,18 @@ +package loki + +import ( + "fmt" + "net/http" +) + +func (t *Loki) servicesHandler(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "text/plain") + + // TODO: this could be extended to also print sub-services, if given service has any + for mod, s := range t.serviceMap { + if s != nil { + fmt.Fprintf(w, "%v => %v\n", mod, s.State()) + } + } +} diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 4c5fa670957c..343516f9d10f 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -6,7 +6,6 @@ import ( "net/http" "time" - "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/weaveworks/common/httpgrpc" @@ -37,8 +36,6 @@ const ( tailerWaitEntryThrottle = time.Second / 2 ) -var readinessProbeSuccess = []byte("Ready") - // Config for a querier. type Config struct { QueryTimeout time.Duration `yaml:"query_timeout"` @@ -99,21 +96,6 @@ type responseFromIngesters struct { response interface{} } -// ReadinessHandler is used to indicate to k8s when the querier is ready. -// Returns 200 when the querier is ready, 500 otherwise. -func (q *Querier) ReadinessHandler(w http.ResponseWriter, r *http.Request) { - _, err := q.ring.GetAll() - if err != nil { - http.Error(w, "Not ready: "+err.Error(), http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusOK) - if _, err := w.Write(readinessProbeSuccess); err != nil { - level.Error(util.Logger).Log("msg", "error writing success message", "error", err) - } -} - // forAllIngesters runs f, in parallel, for all ingesters // TODO taken from Cortex, see if we can refactor out an usable interface. func (q *Querier) forAllIngesters(ctx context.Context, f func(logproto.QuerierClient) (interface{}, error)) ([]responseFromIngesters, error) { diff --git a/vendor/github.com/Masterminds/squirrel/.gitignore b/vendor/github.com/Masterminds/squirrel/.gitignore new file mode 100644 index 000000000000..4a0699f0b7f3 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/.gitignore @@ -0,0 +1 @@ +squirrel.test \ No newline at end of file diff --git a/vendor/github.com/Masterminds/squirrel/.travis.yml b/vendor/github.com/Masterminds/squirrel/.travis.yml new file mode 100644 index 000000000000..bc6be0f814ec --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/.travis.yml @@ -0,0 +1,22 @@ +language: go + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - tip + +# Setting sudo access to false will let Travis CI use containers rather than +# VMs to run the tests. For more details see: +# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ +# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ +sudo: false + +install: + - go get + - go get github.com/stretchr/testify/assert + +notifications: + irc: "irc.freenode.net#masterminds" diff --git a/vendor/github.com/Masterminds/squirrel/LICENSE.txt b/vendor/github.com/Masterminds/squirrel/LICENSE.txt new file mode 100644 index 000000000000..74c20a2b9702 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/LICENSE.txt @@ -0,0 +1,23 @@ +Squirrel +The Masterminds +Copyright (C) 2014-2015, Lann Martin +Copyright (C) 2015-2016, Google +Copyright (C) 2015, Matt Farina and Matt Butcher + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/squirrel/README.md b/vendor/github.com/Masterminds/squirrel/README.md new file mode 100644 index 000000000000..e0c4394c969c --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/README.md @@ -0,0 +1,118 @@ +# Squirrel - fluent SQL generator for Go + +```go +import "gopkg.in/Masterminds/squirrel.v1" +``` +or if you prefer using `master` (which may be arbitrarily ahead of or behind `v1`): + +**NOTE:** as of Go 1.6, `go get` correctly clones the Github default branch (which is `v1` in this repo). +```go +import "github.com/Masterminds/squirrel" +``` + +[![GoDoc](https://godoc.org/github.com/Masterminds/squirrel?status.png)](https://godoc.org/github.com/Masterminds/squirrel) +[![Build Status](https://travis-ci.org/Masterminds/squirrel.svg?branch=v1)](https://travis-ci.org/Masterminds/squirrel) + +_**Note:** This project has moved from `github.com/lann/squirrel` to +`github.com/Masterminds/squirrel`. Lann remains the architect of the +project, but we're helping him curate. + +**Squirrel is not an ORM.** For an application of Squirrel, check out +[structable, a table-struct mapper](https://github.com/technosophos/structable) + + +Squirrel helps you build SQL queries from composable parts: + +```go +import sq "github.com/Masterminds/squirrel" + +users := sq.Select("*").From("users").Join("emails USING (email_id)") + +active := users.Where(sq.Eq{"deleted_at": nil}) + +sql, args, err := active.ToSql() + +sql == "SELECT * FROM users JOIN emails USING (email_id) WHERE deleted_at IS NULL" +``` + +```go +sql, args, err := sq. + Insert("users").Columns("name", "age"). + Values("moe", 13).Values("larry", sq.Expr("? + 5", 12)). + ToSql() + +sql == "INSERT INTO users (name,age) VALUES (?,?),(?,? + 5)" +``` + +Squirrel can also execute queries directly: + +```go +stooges := users.Where(sq.Eq{"username": []string{"moe", "larry", "curly", "shemp"}}) +three_stooges := stooges.Limit(3) +rows, err := three_stooges.RunWith(db).Query() + +// Behaves like: +rows, err := db.Query("SELECT * FROM users WHERE username IN (?,?,?,?) LIMIT 3", + "moe", "larry", "curly", "shemp") +``` + +Squirrel makes conditional query building a breeze: + +```go +if len(q) > 0 { + users = users.Where("name LIKE ?", fmt.Sprint("%", q, "%")) +} +``` + +Squirrel wants to make your life easier: + +```go +// StmtCache caches Prepared Stmts for you +dbCache := sq.NewStmtCacher(db) + +// StatementBuilder keeps your syntax neat +mydb := sq.StatementBuilder.RunWith(dbCache) +select_users := mydb.Select("*").From("users") +``` + +Squirrel loves PostgreSQL: + +```go +psql := sq.StatementBuilder.PlaceholderFormat(sq.Dollar) + +// You use question marks for placeholders... +sql, _, _ := psql.Select("*").From("elephants").Where("name IN (?,?)", "Dumbo", "Verna") + +/// ...squirrel replaces them using PlaceholderFormat. +sql == "SELECT * FROM elephants WHERE name IN ($1,$2)" + + +/// You can retrieve id ... +query := sq.Insert("nodes"). + Columns("uuid", "type", "data"). + Values(node.Uuid, node.Type, node.Data). + Suffix("RETURNING \"id\""). + RunWith(m.db). + PlaceholderFormat(sq.Dollar) + +query.QueryRow().Scan(&node.id) +``` + +You can escape question mask by inserting two question marks: + +```sql +SELECT * FROM nodes WHERE meta->'format' ??| array[?,?] +``` + +will generate with the Dollar Placeholder: + +```sql +SELECT * FROM nodes WHERE meta->'format' ?| array[$1,$2] +``` + + + +## License + +Squirrel is released under the +[MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/Masterminds/squirrel/case.go b/vendor/github.com/Masterminds/squirrel/case.go new file mode 100644 index 000000000000..2eb69dd5c9d3 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/case.go @@ -0,0 +1,118 @@ +package squirrel + +import ( + "bytes" + "errors" + + "github.com/lann/builder" +) + +func init() { + builder.Register(CaseBuilder{}, caseData{}) +} + +// sqlizerBuffer is a helper that allows to write many Sqlizers one by one +// without constant checks for errors that may come from Sqlizer +type sqlizerBuffer struct { + bytes.Buffer + args []interface{} + err error +} + +// WriteSql converts Sqlizer to SQL strings and writes it to buffer +func (b *sqlizerBuffer) WriteSql(item Sqlizer) { + if b.err != nil { + return + } + + var str string + var args []interface{} + str, args, b.err = item.ToSql() + + if b.err != nil { + return + } + + b.WriteString(str) + b.WriteByte(' ') + b.args = append(b.args, args...) +} + +func (b *sqlizerBuffer) ToSql() (string, []interface{}, error) { + return b.String(), b.args, b.err +} + +// whenPart is a helper structure to describe SQLs "WHEN ... THEN ..." expression +type whenPart struct { + when Sqlizer + then Sqlizer +} + +func newWhenPart(when interface{}, then interface{}) whenPart { + return whenPart{newPart(when), newPart(then)} +} + +// caseData holds all the data required to build a CASE SQL construct +type caseData struct { + What Sqlizer + WhenParts []whenPart + Else Sqlizer +} + +// ToSql implements Sqlizer +func (d *caseData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.WhenParts) == 0 { + err = errors.New("case expression must contain at lease one WHEN clause") + + return + } + + sql := sqlizerBuffer{} + + sql.WriteString("CASE ") + if d.What != nil { + sql.WriteSql(d.What) + } + + for _, p := range d.WhenParts { + sql.WriteString("WHEN ") + sql.WriteSql(p.when) + sql.WriteString("THEN ") + sql.WriteSql(p.then) + } + + if d.Else != nil { + sql.WriteString("ELSE ") + sql.WriteSql(d.Else) + } + + sql.WriteString("END") + + return sql.ToSql() +} + +// CaseBuilder builds SQL CASE construct which could be used as parts of queries. +type CaseBuilder builder.Builder + +// ToSql builds the query into a SQL string and bound args. +func (b CaseBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(caseData) + return data.ToSql() +} + +// what sets optional value for CASE construct "CASE [value] ..." +func (b CaseBuilder) what(expr interface{}) CaseBuilder { + return builder.Set(b, "What", newPart(expr)).(CaseBuilder) +} + +// When adds "WHEN ... THEN ..." part to CASE construct +func (b CaseBuilder) When(when interface{}, then interface{}) CaseBuilder { + // TODO: performance hint: replace slice of WhenPart with just slice of parts + // where even indices of the slice belong to "when"s and odd indices belong to "then"s + return builder.Append(b, "WhenParts", newWhenPart(when, then)).(CaseBuilder) +} + +// What sets optional "ELSE ..." part for CASE construct +func (b CaseBuilder) Else(expr interface{}) CaseBuilder { + return builder.Set(b, "Else", newPart(expr)).(CaseBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/delete.go b/vendor/github.com/Masterminds/squirrel/delete.go new file mode 100644 index 000000000000..8aa4f1e667ba --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/delete.go @@ -0,0 +1,152 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "github.com/lann/builder" + "strings" +) + +type deleteData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes exprs + From string + WhereParts []Sqlizer + OrderBys []string + Limit string + Offset string + Suffixes exprs +} + +func (d *deleteData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *deleteData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.From) == 0 { + err = fmt.Errorf("delete statements must specify a From table") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, _ = d.Prefixes.AppendToSql(sql, " ", args) + sql.WriteString(" ") + } + + sql.WriteString("DELETE FROM ") + sql.WriteString(d.From) + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderBys) > 0 { + sql.WriteString(" ORDER BY ") + sql.WriteString(strings.Join(d.OrderBys, ", ")) + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, _ = d.Suffixes.AppendToSql(sql, " ", args) + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + + +// Builder + +// DeleteBuilder builds SQL DELETE statements. +type DeleteBuilder builder.Builder + +func init() { + builder.Register(DeleteBuilder{}, deleteData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b DeleteBuilder) PlaceholderFormat(f PlaceholderFormat) DeleteBuilder { + return builder.Set(b, "PlaceholderFormat", f).(DeleteBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b DeleteBuilder) RunWith(runner BaseRunner) DeleteBuilder { + return setRunWith(b, runner).(DeleteBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b DeleteBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(deleteData) + return data.Exec() +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b DeleteBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(deleteData) + return data.ToSql() +} + +// Prefix adds an expression to the beginning of the query +func (b DeleteBuilder) Prefix(sql string, args ...interface{}) DeleteBuilder { + return builder.Append(b, "Prefixes", Expr(sql, args...)).(DeleteBuilder) +} + +// From sets the table to be deleted from. +func (b DeleteBuilder) From(from string) DeleteBuilder { + return builder.Set(b, "From", from).(DeleteBuilder) +} + +// Where adds WHERE expressions to the query. +// +// See SelectBuilder.Where for more information. +func (b DeleteBuilder) Where(pred interface{}, args ...interface{}) DeleteBuilder { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(DeleteBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b DeleteBuilder) OrderBy(orderBys ...string) DeleteBuilder { + return builder.Extend(b, "OrderBys", orderBys).(DeleteBuilder) +} + +// Limit sets a LIMIT clause on the query. +func (b DeleteBuilder) Limit(limit uint64) DeleteBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(DeleteBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b DeleteBuilder) Offset(offset uint64) DeleteBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(DeleteBuilder) +} + +// Suffix adds an expression to the end of the query +func (b DeleteBuilder) Suffix(sql string, args ...interface{}) DeleteBuilder { + return builder.Append(b, "Suffixes", Expr(sql, args...)).(DeleteBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/expr.go b/vendor/github.com/Masterminds/squirrel/expr.go new file mode 100644 index 000000000000..a8749f10d58d --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/expr.go @@ -0,0 +1,247 @@ +package squirrel + +import ( + "database/sql/driver" + "fmt" + "io" + "reflect" + "strings" +) + +type expr struct { + sql string + args []interface{} +} + +// Expr builds value expressions for InsertBuilder and UpdateBuilder. +// +// Ex: +// .Values(Expr("FROM_UNIXTIME(?)", t)) +func Expr(sql string, args ...interface{}) expr { + return expr{sql: sql, args: args} +} + +func (e expr) ToSql() (sql string, args []interface{}, err error) { + return e.sql, e.args, nil +} + +type exprs []expr + +func (es exprs) AppendToSql(w io.Writer, sep string, args []interface{}) ([]interface{}, error) { + for i, e := range es { + if i > 0 { + _, err := io.WriteString(w, sep) + if err != nil { + return nil, err + } + } + _, err := io.WriteString(w, e.sql) + if err != nil { + return nil, err + } + args = append(args, e.args...) + } + return args, nil +} + +// aliasExpr helps to alias part of SQL query generated with underlying "expr" +type aliasExpr struct { + expr Sqlizer + alias string +} + +// Alias allows to define alias for column in SelectBuilder. Useful when column is +// defined as complex expression like IF or CASE +// Ex: +// .Column(Alias(caseStmt, "case_column")) +func Alias(expr Sqlizer, alias string) aliasExpr { + return aliasExpr{expr, alias} +} + +func (e aliasExpr) ToSql() (sql string, args []interface{}, err error) { + sql, args, err = e.expr.ToSql() + if err == nil { + sql = fmt.Sprintf("(%s) AS %s", sql, e.alias) + } + return +} + +// Eq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(Eq{"id": 1}) +type Eq map[string]interface{} + +func (eq Eq) toSql(useNotOpr bool) (sql string, args []interface{}, err error) { + var ( + exprs []string + equalOpr string = "=" + inOpr string = "IN" + nullOpr string = "IS" + ) + + if useNotOpr { + equalOpr = "<>" + inOpr = "NOT IN" + nullOpr = "IS NOT" + } + + for key, val := range eq { + expr := "" + + switch v := val.(type) { + case driver.Valuer: + if val, err = v.Value(); err != nil { + return + } + } + + if val == nil { + expr = fmt.Sprintf("%s %s NULL", key, nullOpr) + } else { + valVal := reflect.ValueOf(val) + if valVal.Kind() == reflect.Array || valVal.Kind() == reflect.Slice { + if valVal.Len() == 0 { + expr = fmt.Sprintf("%s %s (NULL)", key, inOpr) + if args == nil { + args = []interface{}{} + } + } else { + for i := 0; i < valVal.Len(); i++ { + args = append(args, valVal.Index(i).Interface()) + } + expr = fmt.Sprintf("%s %s (%s)", key, inOpr, Placeholders(valVal.Len())) + } + } else { + expr = fmt.Sprintf("%s %s ?", key, equalOpr) + args = append(args, val) + } + } + exprs = append(exprs, expr) + } + sql = strings.Join(exprs, " AND ") + return +} + +func (eq Eq) ToSql() (sql string, args []interface{}, err error) { + return eq.toSql(false) +} + +// NotEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(NotEq{"id": 1}) == "id <> 1" +type NotEq Eq + +func (neq NotEq) ToSql() (sql string, args []interface{}, err error) { + return Eq(neq).toSql(true) +} + +// Lt is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(Lt{"id": 1}) +type Lt map[string]interface{} + +func (lt Lt) toSql(opposite, orEq bool) (sql string, args []interface{}, err error) { + var ( + exprs []string + opr string = "<" + ) + + if opposite { + opr = ">" + } + + if orEq { + opr = fmt.Sprintf("%s%s", opr, "=") + } + + for key, val := range lt { + expr := "" + + switch v := val.(type) { + case driver.Valuer: + if val, err = v.Value(); err != nil { + return + } + } + + if val == nil { + err = fmt.Errorf("cannot use null with less than or greater than operators") + return + } else { + valVal := reflect.ValueOf(val) + if valVal.Kind() == reflect.Array || valVal.Kind() == reflect.Slice { + err = fmt.Errorf("cannot use array or slice with less than or greater than operators") + return + } else { + expr = fmt.Sprintf("%s %s ?", key, opr) + args = append(args, val) + } + } + exprs = append(exprs, expr) + } + sql = strings.Join(exprs, " AND ") + return +} + +func (lt Lt) ToSql() (sql string, args []interface{}, err error) { + return lt.toSql(false, false) +} + +// LtOrEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(LtOrEq{"id": 1}) == "id <= 1" +type LtOrEq Lt + +func (ltOrEq LtOrEq) ToSql() (sql string, args []interface{}, err error) { + return Lt(ltOrEq).toSql(false, true) +} + +// Gt is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(Gt{"id": 1}) == "id > 1" +type Gt Lt + +func (gt Gt) ToSql() (sql string, args []interface{}, err error) { + return Lt(gt).toSql(true, false) +} + +// GtOrEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(GtOrEq{"id": 1}) == "id >= 1" +type GtOrEq Lt + +func (gtOrEq GtOrEq) ToSql() (sql string, args []interface{}, err error) { + return Lt(gtOrEq).toSql(true, true) +} + +type conj []Sqlizer + +func (c conj) join(sep string) (sql string, args []interface{}, err error) { + var sqlParts []string + for _, sqlizer := range c { + partSql, partArgs, err := sqlizer.ToSql() + if err != nil { + return "", nil, err + } + if partSql != "" { + sqlParts = append(sqlParts, partSql) + args = append(args, partArgs...) + } + } + if len(sqlParts) > 0 { + sql = fmt.Sprintf("(%s)", strings.Join(sqlParts, sep)) + } + return +} + +type And conj + +func (a And) ToSql() (string, []interface{}, error) { + return conj(a).join(" AND ") +} + +type Or conj + +func (o Or) ToSql() (string, []interface{}, error) { + return conj(o).join(" OR ") +} diff --git a/vendor/github.com/Masterminds/squirrel/insert.go b/vendor/github.com/Masterminds/squirrel/insert.go new file mode 100644 index 000000000000..f08025f500cc --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/insert.go @@ -0,0 +1,207 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "github.com/lann/builder" + "strings" +) + +type insertData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes exprs + Options []string + Into string + Columns []string + Values [][]interface{} + Suffixes exprs +} + +func (d *insertData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *insertData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *insertData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *insertData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.Into) == 0 { + err = fmt.Errorf("insert statements must specify a table") + return + } + if len(d.Values) == 0 { + err = fmt.Errorf("insert statements must have at least one set of values") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, _ = d.Prefixes.AppendToSql(sql, " ", args) + sql.WriteString(" ") + } + + sql.WriteString("INSERT ") + + if len(d.Options) > 0 { + sql.WriteString(strings.Join(d.Options, " ")) + sql.WriteString(" ") + } + + sql.WriteString("INTO ") + sql.WriteString(d.Into) + sql.WriteString(" ") + + if len(d.Columns) > 0 { + sql.WriteString("(") + sql.WriteString(strings.Join(d.Columns, ",")) + sql.WriteString(") ") + } + + sql.WriteString("VALUES ") + + valuesStrings := make([]string, len(d.Values)) + for r, row := range d.Values { + valueStrings := make([]string, len(row)) + for v, val := range row { + e, isExpr := val.(expr) + if isExpr { + valueStrings[v] = e.sql + args = append(args, e.args...) + } else { + valueStrings[v] = "?" + args = append(args, val) + } + } + valuesStrings[r] = fmt.Sprintf("(%s)", strings.Join(valueStrings, ",")) + } + sql.WriteString(strings.Join(valuesStrings, ",")) + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, _ = d.Suffixes.AppendToSql(sql, " ", args) + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +// Builder + +// InsertBuilder builds SQL INSERT statements. +type InsertBuilder builder.Builder + +func init() { + builder.Register(InsertBuilder{}, insertData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b InsertBuilder) PlaceholderFormat(f PlaceholderFormat) InsertBuilder { + return builder.Set(b, "PlaceholderFormat", f).(InsertBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b InsertBuilder) RunWith(runner BaseRunner) InsertBuilder { + return setRunWith(b, runner).(InsertBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b InsertBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(insertData) + return data.Exec() +} + +// Query builds and Querys the query with the Runner set by RunWith. +func (b InsertBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(insertData) + return data.Query() +} + +// QueryRow builds and QueryRows the query with the Runner set by RunWith. +func (b InsertBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(insertData) + return data.QueryRow() +} + +// Scan is a shortcut for QueryRow().Scan. +func (b InsertBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b InsertBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(insertData) + return data.ToSql() +} + +// Prefix adds an expression to the beginning of the query +func (b InsertBuilder) Prefix(sql string, args ...interface{}) InsertBuilder { + return builder.Append(b, "Prefixes", Expr(sql, args...)).(InsertBuilder) +} + +// Options adds keyword options before the INTO clause of the query. +func (b InsertBuilder) Options(options ...string) InsertBuilder { + return builder.Extend(b, "Options", options).(InsertBuilder) +} + +// Into sets the INTO clause of the query. +func (b InsertBuilder) Into(from string) InsertBuilder { + return builder.Set(b, "Into", from).(InsertBuilder) +} + +// Columns adds insert columns to the query. +func (b InsertBuilder) Columns(columns ...string) InsertBuilder { + return builder.Extend(b, "Columns", columns).(InsertBuilder) +} + +// Values adds a single row's values to the query. +func (b InsertBuilder) Values(values ...interface{}) InsertBuilder { + return builder.Append(b, "Values", values).(InsertBuilder) +} + +// Suffix adds an expression to the end of the query +func (b InsertBuilder) Suffix(sql string, args ...interface{}) InsertBuilder { + return builder.Append(b, "Suffixes", Expr(sql, args...)).(InsertBuilder) +} + +// SetMap set columns and values for insert builder from a map of column name and value +// note that it will reset all previous columns and values was set if any +func (b InsertBuilder) SetMap(clauses map[string]interface{}) InsertBuilder { + cols := make([]string, 0, len(clauses)) + vals := make([]interface{}, 0, len(clauses)) + for col, val := range clauses { + cols = append(cols, col) + vals = append(vals, val) + } + + b = builder.Set(b, "Columns", cols).(InsertBuilder) + b = builder.Set(b, "Values", [][]interface{}{vals}).(InsertBuilder) + return b +} diff --git a/vendor/github.com/Masterminds/squirrel/part.go b/vendor/github.com/Masterminds/squirrel/part.go new file mode 100644 index 000000000000..2926d0315135 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/part.go @@ -0,0 +1,55 @@ +package squirrel + +import ( + "fmt" + "io" +) + +type part struct { + pred interface{} + args []interface{} +} + +func newPart(pred interface{}, args ...interface{}) Sqlizer { + return &part{pred, args} +} + +func (p part) ToSql() (sql string, args []interface{}, err error) { + switch pred := p.pred.(type) { + case nil: + // no-op + case Sqlizer: + sql, args, err = pred.ToSql() + case string: + sql = pred + args = p.args + default: + err = fmt.Errorf("expected string or Sqlizer, not %T", pred) + } + return +} + +func appendToSql(parts []Sqlizer, w io.Writer, sep string, args []interface{}) ([]interface{}, error) { + for i, p := range parts { + partSql, partArgs, err := p.ToSql() + if err != nil { + return nil, err + } else if len(partSql) == 0 { + continue + } + + if i > 0 { + _, err := io.WriteString(w, sep) + if err != nil { + return nil, err + } + } + + _, err = io.WriteString(w, partSql) + if err != nil { + return nil, err + } + args = append(args, partArgs...) + } + return args, nil +} diff --git a/vendor/github.com/Masterminds/squirrel/placeholder.go b/vendor/github.com/Masterminds/squirrel/placeholder.go new file mode 100644 index 000000000000..d377788b95db --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/placeholder.go @@ -0,0 +1,70 @@ +package squirrel + +import ( + "bytes" + "fmt" + "strings" +) + +// PlaceholderFormat is the interface that wraps the ReplacePlaceholders method. +// +// ReplacePlaceholders takes a SQL statement and replaces each question mark +// placeholder with a (possibly different) SQL placeholder. +type PlaceholderFormat interface { + ReplacePlaceholders(sql string) (string, error) +} + +var ( + // Question is a PlaceholderFormat instance that leaves placeholders as + // question marks. + Question = questionFormat{} + + // Dollar is a PlaceholderFormat instance that replaces placeholders with + // dollar-prefixed positional placeholders (e.g. $1, $2, $3). + Dollar = dollarFormat{} +) + +type questionFormat struct{} + +func (_ questionFormat) ReplacePlaceholders(sql string) (string, error) { + return sql, nil +} + +type dollarFormat struct{} + +func (_ dollarFormat) ReplacePlaceholders(sql string) (string, error) { + buf := &bytes.Buffer{} + i := 0 + for { + p := strings.Index(sql, "?") + if p == -1 { + break + } + + if len(sql[p:]) > 1 && sql[p:p+2] == "??" { // escape ?? => ? + buf.WriteString(sql[:p]) + buf.WriteString("?") + if len(sql[p:]) == 1 { + break + } + sql = sql[p+2:] + } else { + i++ + buf.WriteString(sql[:p]) + fmt.Fprintf(buf, "$%d", i) + sql = sql[p+1:] + } + } + + buf.WriteString(sql) + return buf.String(), nil +} + +// Placeholders returns a string with count ? placeholders joined with commas. +func Placeholders(count int) string { + if count < 1 { + return "" + } + + return strings.Repeat(",?", count)[1:] +} diff --git a/vendor/github.com/Masterminds/squirrel/row.go b/vendor/github.com/Masterminds/squirrel/row.go new file mode 100644 index 000000000000..74ffda92bde9 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/row.go @@ -0,0 +1,22 @@ +package squirrel + +// RowScanner is the interface that wraps the Scan method. +// +// Scan behaves like database/sql.Row.Scan. +type RowScanner interface { + Scan(...interface{}) error +} + +// Row wraps database/sql.Row to let squirrel return new errors on Scan. +type Row struct { + RowScanner + err error +} + +// Scan returns Row.err or calls RowScanner.Scan. +func (r *Row) Scan(dest ...interface{}) error { + if r.err != nil { + return r.err + } + return r.RowScanner.Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/select.go b/vendor/github.com/Masterminds/squirrel/select.go new file mode 100644 index 000000000000..7dc09bc5a4d1 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/select.go @@ -0,0 +1,313 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + + "github.com/lann/builder" +) + +type selectData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes exprs + Options []string + Columns []Sqlizer + From Sqlizer + Joins []Sqlizer + WhereParts []Sqlizer + GroupBys []string + HavingParts []Sqlizer + OrderBys []string + Limit string + Offset string + Suffixes exprs +} + +func (d *selectData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *selectData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *selectData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *selectData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.Columns) == 0 { + err = fmt.Errorf("select statements must have at least one result column") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, _ = d.Prefixes.AppendToSql(sql, " ", args) + sql.WriteString(" ") + } + + sql.WriteString("SELECT ") + + if len(d.Options) > 0 { + sql.WriteString(strings.Join(d.Options, " ")) + sql.WriteString(" ") + } + + if len(d.Columns) > 0 { + args, err = appendToSql(d.Columns, sql, ", ", args) + if err != nil { + return + } + } + + if d.From != nil { + sql.WriteString(" FROM ") + args, err = appendToSql([]Sqlizer{d.From}, sql, "", args) + if err != nil { + return + } + } + + if len(d.Joins) > 0 { + sql.WriteString(" ") + args, err = appendToSql(d.Joins, sql, " ", args) + if err != nil { + return + } + } + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.GroupBys) > 0 { + sql.WriteString(" GROUP BY ") + sql.WriteString(strings.Join(d.GroupBys, ", ")) + } + + if len(d.HavingParts) > 0 { + sql.WriteString(" HAVING ") + args, err = appendToSql(d.HavingParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderBys) > 0 { + sql.WriteString(" ORDER BY ") + sql.WriteString(strings.Join(d.OrderBys, ", ")) + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, _ = d.Suffixes.AppendToSql(sql, " ", args) + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +// Builder + +// SelectBuilder builds SQL SELECT statements. +type SelectBuilder builder.Builder + +func init() { + builder.Register(SelectBuilder{}, selectData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b SelectBuilder) PlaceholderFormat(f PlaceholderFormat) SelectBuilder { + return builder.Set(b, "PlaceholderFormat", f).(SelectBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b SelectBuilder) RunWith(runner BaseRunner) SelectBuilder { + return setRunWith(b, runner).(SelectBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b SelectBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(selectData) + return data.Exec() +} + +// Query builds and Querys the query with the Runner set by RunWith. +func (b SelectBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(selectData) + return data.Query() +} + +// QueryRow builds and QueryRows the query with the Runner set by RunWith. +func (b SelectBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(selectData) + return data.QueryRow() +} + +// Scan is a shortcut for QueryRow().Scan. +func (b SelectBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b SelectBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(selectData) + return data.ToSql() +} + +// Prefix adds an expression to the beginning of the query +func (b SelectBuilder) Prefix(sql string, args ...interface{}) SelectBuilder { + return builder.Append(b, "Prefixes", Expr(sql, args...)).(SelectBuilder) +} + +// Distinct adds a DISTINCT clause to the query. +func (b SelectBuilder) Distinct() SelectBuilder { + return b.Options("DISTINCT") +} + +// Options adds select option to the query +func (b SelectBuilder) Options(options ...string) SelectBuilder { + return builder.Extend(b, "Options", options).(SelectBuilder) +} + +// Columns adds result columns to the query. +func (b SelectBuilder) Columns(columns ...string) SelectBuilder { + var parts []interface{} + for _, str := range columns { + parts = append(parts, newPart(str)) + } + return builder.Extend(b, "Columns", parts).(SelectBuilder) +} + +// Column adds a result column to the query. +// Unlike Columns, Column accepts args which will be bound to placeholders in +// the columns string, for example: +// Column("IF(col IN ("+squirrel.Placeholders(3)+"), 1, 0) as col", 1, 2, 3) +func (b SelectBuilder) Column(column interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "Columns", newPart(column, args...)).(SelectBuilder) +} + +// From sets the FROM clause of the query. +func (b SelectBuilder) From(from string) SelectBuilder { + return builder.Set(b, "From", newPart(from)).(SelectBuilder) +} + +// FromSelect sets a subquery into the FROM clause of the query. +func (b SelectBuilder) FromSelect(from SelectBuilder, alias string) SelectBuilder { + return builder.Set(b, "From", Alias(from, alias)).(SelectBuilder) +} + +// JoinClause adds a join clause to the query. +func (b SelectBuilder) JoinClause(pred interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "Joins", newPart(pred, args...)).(SelectBuilder) +} + +// Join adds a JOIN clause to the query. +func (b SelectBuilder) Join(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("JOIN "+join, rest...) +} + +// LeftJoin adds a LEFT JOIN clause to the query. +func (b SelectBuilder) LeftJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("LEFT JOIN "+join, rest...) +} + +// RightJoin adds a RIGHT JOIN clause to the query. +func (b SelectBuilder) RightJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("RIGHT JOIN "+join, rest...) +} + +// Where adds an expression to the WHERE clause of the query. +// +// Expressions are ANDed together in the generated SQL. +// +// Where accepts several types for its pred argument: +// +// nil OR "" - ignored. +// +// string - SQL expression. +// If the expression has SQL placeholders then a set of arguments must be passed +// as well, one for each placeholder. +// +// map[string]interface{} OR Eq - map of SQL expressions to values. Each key is +// transformed into an expression like " = ?", with the corresponding value +// bound to the placeholder. If the value is nil, the expression will be " +// IS NULL". If the value is an array or slice, the expression will be " IN +// (?,?,...)", with one placeholder for each item in the value. These expressions +// are ANDed together. +// +// Where will panic if pred isn't any of the above types. +func (b SelectBuilder) Where(pred interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(SelectBuilder) +} + +// GroupBy adds GROUP BY expressions to the query. +func (b SelectBuilder) GroupBy(groupBys ...string) SelectBuilder { + return builder.Extend(b, "GroupBys", groupBys).(SelectBuilder) +} + +// Having adds an expression to the HAVING clause of the query. +// +// See Where. +func (b SelectBuilder) Having(pred interface{}, rest ...interface{}) SelectBuilder { + return builder.Append(b, "HavingParts", newWherePart(pred, rest...)).(SelectBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b SelectBuilder) OrderBy(orderBys ...string) SelectBuilder { + return builder.Extend(b, "OrderBys", orderBys).(SelectBuilder) +} + +// Limit sets a LIMIT clause on the query. +func (b SelectBuilder) Limit(limit uint64) SelectBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(SelectBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b SelectBuilder) Offset(offset uint64) SelectBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(SelectBuilder) +} + +// Suffix adds an expression to the end of the query +func (b SelectBuilder) Suffix(sql string, args ...interface{}) SelectBuilder { + return builder.Append(b, "Suffixes", Expr(sql, args...)).(SelectBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/squirrel.go b/vendor/github.com/Masterminds/squirrel/squirrel.go new file mode 100644 index 000000000000..89aaf3dcfc4b --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/squirrel.go @@ -0,0 +1,166 @@ +// Package squirrel provides a fluent SQL generator. +// +// See https://github.com/lann/squirrel for examples. +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + + "github.com/lann/builder" +) + +// Sqlizer is the interface that wraps the ToSql method. +// +// ToSql returns a SQL representation of the Sqlizer, along with a slice of args +// as passed to e.g. database/sql.Exec. It can also return an error. +type Sqlizer interface { + ToSql() (string, []interface{}, error) +} + +// Execer is the interface that wraps the Exec method. +// +// Exec executes the given query as implemented by database/sql.Exec. +type Execer interface { + Exec(query string, args ...interface{}) (sql.Result, error) +} + +// Queryer is the interface that wraps the Query method. +// +// Query executes the given query as implemented by database/sql.Query. +type Queryer interface { + Query(query string, args ...interface{}) (*sql.Rows, error) +} + +// QueryRower is the interface that wraps the QueryRow method. +// +// QueryRow executes the given query as implemented by database/sql.QueryRow. +type QueryRower interface { + QueryRow(query string, args ...interface{}) RowScanner +} + +// BaseRunner groups the Execer and Queryer interfaces. +type BaseRunner interface { + Execer + Queryer +} + +// Runner groups the Execer, Queryer, and QueryRower interfaces. +type Runner interface { + Execer + Queryer + QueryRower +} + +// DBRunner wraps sql.DB to implement Runner. +type dbRunner struct { + *sql.DB +} + +func (r *dbRunner) QueryRow(query string, args ...interface{}) RowScanner { + return r.DB.QueryRow(query, args...) +} + +type txRunner struct { + *sql.Tx +} + +func (r *txRunner) QueryRow(query string, args ...interface{}) RowScanner { + return r.Tx.QueryRow(query, args...) +} + +func setRunWith(b interface{}, baseRunner BaseRunner) interface{} { + var runner Runner + switch r := baseRunner.(type) { + case Runner: + runner = r + case *sql.DB: + runner = &dbRunner{r} + case *sql.Tx: + runner = &txRunner{r} + } + return builder.Set(b, "RunWith", runner) +} + +// RunnerNotSet is returned by methods that need a Runner if it isn't set. +var RunnerNotSet = fmt.Errorf("cannot run; no Runner set (RunWith)") + +// RunnerNotQueryRunner is returned by QueryRow if the RunWith value doesn't implement QueryRower. +var RunnerNotQueryRunner = fmt.Errorf("cannot QueryRow; Runner is not a QueryRower") + +// ExecWith Execs the SQL returned by s with db. +func ExecWith(db Execer, s Sqlizer) (res sql.Result, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.Exec(query, args...) +} + +// QueryWith Querys the SQL returned by s with db. +func QueryWith(db Queryer, s Sqlizer) (rows *sql.Rows, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.Query(query, args...) +} + +// QueryRowWith QueryRows the SQL returned by s with db. +func QueryRowWith(db QueryRower, s Sqlizer) RowScanner { + query, args, err := s.ToSql() + return &Row{RowScanner: db.QueryRow(query, args...), err: err} +} + +// DebugSqlizer calls ToSql on s and shows the approximate SQL to be executed +// +// If ToSql returns an error, the result of this method will look like: +// "[ToSql error: %s]" or "[DebugSqlizer error: %s]" +// +// IMPORTANT: As its name suggests, this function should only be used for +// debugging. While the string result *might* be valid SQL, this function does +// not try very hard to ensure it. Additionally, executing the output of this +// function with any untrusted user input is certainly insecure. +func DebugSqlizer(s Sqlizer) string { + sql, args, err := s.ToSql() + if err != nil { + return fmt.Sprintf("[ToSql error: %s]", err) + } + + // TODO: dedupe this with placeholder.go + buf := &bytes.Buffer{} + i := 0 + for { + p := strings.Index(sql, "?") + if p == -1 { + break + } + if len(sql[p:]) > 1 && sql[p:p+2] == "??" { // escape ?? => ? + buf.WriteString(sql[:p]) + buf.WriteString("?") + if len(sql[p:]) == 1 { + break + } + sql = sql[p+2:] + } else { + if i+1 > len(args) { + return fmt.Sprintf( + "[DebugSqlizer error: too many placeholders in %#v for %d args]", + sql, len(args)) + } + buf.WriteString(sql[:p]) + fmt.Fprintf(buf, "'%v'", args[i]) + sql = sql[p+1:] + i++ + } + } + if i < len(args) { + return fmt.Sprintf( + "[DebugSqlizer error: not enough placeholders in %#v for %d args]", + sql, len(args)) + } + buf.WriteString(sql) + return buf.String() +} diff --git a/vendor/github.com/Masterminds/squirrel/statement.go b/vendor/github.com/Masterminds/squirrel/statement.go new file mode 100644 index 000000000000..275388f63089 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/statement.go @@ -0,0 +1,83 @@ +package squirrel + +import "github.com/lann/builder" + +// StatementBuilderType is the type of StatementBuilder. +type StatementBuilderType builder.Builder + +// Select returns a SelectBuilder for this StatementBuilderType. +func (b StatementBuilderType) Select(columns ...string) SelectBuilder { + return SelectBuilder(b).Columns(columns...) +} + +// Insert returns a InsertBuilder for this StatementBuilderType. +func (b StatementBuilderType) Insert(into string) InsertBuilder { + return InsertBuilder(b).Into(into) +} + +// Update returns a UpdateBuilder for this StatementBuilderType. +func (b StatementBuilderType) Update(table string) UpdateBuilder { + return UpdateBuilder(b).Table(table) +} + +// Delete returns a DeleteBuilder for this StatementBuilderType. +func (b StatementBuilderType) Delete(from string) DeleteBuilder { + return DeleteBuilder(b).From(from) +} + +// PlaceholderFormat sets the PlaceholderFormat field for any child builders. +func (b StatementBuilderType) PlaceholderFormat(f PlaceholderFormat) StatementBuilderType { + return builder.Set(b, "PlaceholderFormat", f).(StatementBuilderType) +} + +// RunWith sets the RunWith field for any child builders. +func (b StatementBuilderType) RunWith(runner BaseRunner) StatementBuilderType { + return setRunWith(b, runner).(StatementBuilderType) +} + +// StatementBuilder is a parent builder for other builders, e.g. SelectBuilder. +var StatementBuilder = StatementBuilderType(builder.EmptyBuilder).PlaceholderFormat(Question) + +// Select returns a new SelectBuilder, optionally setting some result columns. +// +// See SelectBuilder.Columns. +func Select(columns ...string) SelectBuilder { + return StatementBuilder.Select(columns...) +} + +// Insert returns a new InsertBuilder with the given table name. +// +// See InsertBuilder.Into. +func Insert(into string) InsertBuilder { + return StatementBuilder.Insert(into) +} + +// Update returns a new UpdateBuilder with the given table name. +// +// See UpdateBuilder.Table. +func Update(table string) UpdateBuilder { + return StatementBuilder.Update(table) +} + +// Delete returns a new DeleteBuilder with the given table name. +// +// See DeleteBuilder.Table. +func Delete(from string) DeleteBuilder { + return StatementBuilder.Delete(from) +} + +// Case returns a new CaseBuilder +// "what" represents case value +func Case(what ...interface{}) CaseBuilder { + b := CaseBuilder(builder.EmptyBuilder) + + switch len(what) { + case 0: + case 1: + b = b.what(what[0]) + default: + b = b.what(newPart(what[0], what[1:]...)) + + } + return b +} diff --git a/vendor/github.com/Masterminds/squirrel/stmtcacher.go b/vendor/github.com/Masterminds/squirrel/stmtcacher.go new file mode 100644 index 000000000000..c2dc220883d3 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/stmtcacher.go @@ -0,0 +1,90 @@ +package squirrel + +import ( + "database/sql" + "sync" +) + +// Prepareer is the interface that wraps the Prepare method. +// +// Prepare executes the given query as implemented by database/sql.Prepare. +type Preparer interface { + Prepare(query string) (*sql.Stmt, error) +} + +// DBProxy groups the Execer, Queryer, QueryRower, and Preparer interfaces. +type DBProxy interface { + Execer + Queryer + QueryRower + Preparer +} + +type stmtCacher struct { + prep Preparer + cache map[string]*sql.Stmt + mu sync.Mutex +} + +// NewStmtCacher returns a DBProxy wrapping prep that caches Prepared Stmts. +// +// Stmts are cached based on the string value of their queries. +func NewStmtCacher(prep Preparer) DBProxy { + return &stmtCacher{prep: prep, cache: make(map[string]*sql.Stmt)} +} + +func (sc *stmtCacher) Prepare(query string) (*sql.Stmt, error) { + sc.mu.Lock() + defer sc.mu.Unlock() + stmt, ok := sc.cache[query] + if ok { + return stmt, nil + } + stmt, err := sc.prep.Prepare(query) + if err == nil { + sc.cache[query] = stmt + } + return stmt, err +} + +func (sc *stmtCacher) Exec(query string, args ...interface{}) (res sql.Result, err error) { + stmt, err := sc.Prepare(query) + if err != nil { + return + } + return stmt.Exec(args...) +} + +func (sc *stmtCacher) Query(query string, args ...interface{}) (rows *sql.Rows, err error) { + stmt, err := sc.Prepare(query) + if err != nil { + return + } + return stmt.Query(args...) +} + +func (sc *stmtCacher) QueryRow(query string, args ...interface{}) RowScanner { + stmt, err := sc.Prepare(query) + if err != nil { + return &Row{err: err} + } + return stmt.QueryRow(args...) +} + +type DBProxyBeginner interface { + DBProxy + Begin() (*sql.Tx, error) +} + +type stmtCacheProxy struct { + DBProxy + db *sql.DB +} + +func NewStmtCacheProxy(db *sql.DB) DBProxyBeginner { + return &stmtCacheProxy{DBProxy: NewStmtCacher(db), db: db} +} + +func (sp *stmtCacheProxy) Begin() (*sql.Tx, error) { + return sp.db.Begin() +} diff --git a/vendor/github.com/Masterminds/squirrel/update.go b/vendor/github.com/Masterminds/squirrel/update.go new file mode 100644 index 000000000000..682906bc0567 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/update.go @@ -0,0 +1,232 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "sort" + "strings" + + "github.com/lann/builder" +) + +type updateData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes exprs + Table string + SetClauses []setClause + WhereParts []Sqlizer + OrderBys []string + Limit string + Offset string + Suffixes exprs +} + +type setClause struct { + column string + value interface{} +} + +func (d *updateData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *updateData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *updateData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *updateData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.Table) == 0 { + err = fmt.Errorf("update statements must specify a table") + return + } + if len(d.SetClauses) == 0 { + err = fmt.Errorf("update statements must have at least one Set clause") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, _ = d.Prefixes.AppendToSql(sql, " ", args) + sql.WriteString(" ") + } + + sql.WriteString("UPDATE ") + sql.WriteString(d.Table) + + sql.WriteString(" SET ") + setSqls := make([]string, len(d.SetClauses)) + for i, setClause := range d.SetClauses { + var valSql string + e, isExpr := setClause.value.(expr) + if isExpr { + valSql = e.sql + args = append(args, e.args...) + } else { + valSql = "?" + args = append(args, setClause.value) + } + setSqls[i] = fmt.Sprintf("%s = %s", setClause.column, valSql) + } + sql.WriteString(strings.Join(setSqls, ", ")) + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderBys) > 0 { + sql.WriteString(" ORDER BY ") + sql.WriteString(strings.Join(d.OrderBys, ", ")) + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, _ = d.Suffixes.AppendToSql(sql, " ", args) + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +// Builder + +// UpdateBuilder builds SQL UPDATE statements. +type UpdateBuilder builder.Builder + +func init() { + builder.Register(UpdateBuilder{}, updateData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b UpdateBuilder) PlaceholderFormat(f PlaceholderFormat) UpdateBuilder { + return builder.Set(b, "PlaceholderFormat", f).(UpdateBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b UpdateBuilder) RunWith(runner BaseRunner) UpdateBuilder { + return setRunWith(b, runner).(UpdateBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b UpdateBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(updateData) + return data.Exec() +} + +func (b UpdateBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(updateData) + return data.Query() +} + +func (b UpdateBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(updateData) + return data.QueryRow() +} + +func (b UpdateBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b UpdateBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(updateData) + return data.ToSql() +} + +// Prefix adds an expression to the beginning of the query +func (b UpdateBuilder) Prefix(sql string, args ...interface{}) UpdateBuilder { + return builder.Append(b, "Prefixes", Expr(sql, args...)).(UpdateBuilder) +} + +// Table sets the table to be updated. +func (b UpdateBuilder) Table(table string) UpdateBuilder { + return builder.Set(b, "Table", table).(UpdateBuilder) +} + +// Set adds SET clauses to the query. +func (b UpdateBuilder) Set(column string, value interface{}) UpdateBuilder { + return builder.Append(b, "SetClauses", setClause{column: column, value: value}).(UpdateBuilder) +} + +// SetMap is a convenience method which calls .Set for each key/value pair in clauses. +func (b UpdateBuilder) SetMap(clauses map[string]interface{}) UpdateBuilder { + keys := make([]string, len(clauses)) + i := 0 + for key := range clauses { + keys[i] = key + i++ + } + sort.Strings(keys) + for _, key := range keys { + val, _ := clauses[key] + b = b.Set(key, val) + } + return b +} + +// Where adds WHERE expressions to the query. +// +// See SelectBuilder.Where for more information. +func (b UpdateBuilder) Where(pred interface{}, args ...interface{}) UpdateBuilder { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(UpdateBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b UpdateBuilder) OrderBy(orderBys ...string) UpdateBuilder { + return builder.Extend(b, "OrderBys", orderBys).(UpdateBuilder) +} + +// Limit sets a LIMIT clause on the query. +func (b UpdateBuilder) Limit(limit uint64) UpdateBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(UpdateBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b UpdateBuilder) Offset(offset uint64) UpdateBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(UpdateBuilder) +} + +// Suffix adds an expression to the end of the query +func (b UpdateBuilder) Suffix(sql string, args ...interface{}) UpdateBuilder { + return builder.Append(b, "Suffixes", Expr(sql, args...)).(UpdateBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/where.go b/vendor/github.com/Masterminds/squirrel/where.go new file mode 100644 index 000000000000..3a2d7b709a3f --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/where.go @@ -0,0 +1,28 @@ +package squirrel + +import ( + "fmt" +) + +type wherePart part + +func newWherePart(pred interface{}, args ...interface{}) Sqlizer { + return &wherePart{pred: pred, args: args} +} + +func (p wherePart) ToSql() (sql string, args []interface{}, err error) { + switch pred := p.pred.(type) { + case nil: + // no-op + case Sqlizer: + return pred.ToSql() + case map[string]interface{}: + return Eq(pred).ToSql() + case string: + sql = pred + args = p.args + default: + err = fmt.Errorf("expected string-keyed map or string, not %T", pred) + } + return +} diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore new file mode 100644 index 000000000000..748e4c8073ce --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/.gitignore @@ -0,0 +1,5 @@ +*.sublime-* +.DS_Store +*.swp +*.swo +tags diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml new file mode 100644 index 000000000000..cf31e6af6d50 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - "1.10.x" + - "1.11.x" + - tip diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE new file mode 100644 index 000000000000..4b9986dea714 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2012, Martin Angers +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md new file mode 100644 index 000000000000..07de0c498668 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/README.md @@ -0,0 +1,188 @@ +# Purell + +Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... + +Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. + +[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) + +## Install + +`go get github.com/PuerkitoBio/purell` + +## Changelog + +* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor). +* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121). +* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). +* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). +* **v0.2.0** : Add benchmarks, Attempt IDN support. +* **v0.1.0** : Initial release. + +## Examples + +From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): + +```go +package purell + +import ( + "fmt" + "net/url" +) + +func ExampleNormalizeURLString() { + if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", + FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { + panic(err) + } else { + fmt.Print(normalized) + } + // Output: http://somewebsite.com:80/Amazing%3F/url/ +} + +func ExampleMustNormalizeURLString() { + normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", + FlagsUnsafeGreedy) + fmt.Print(normalized) + + // Output: http://somewebsite.com/Amazing%FA/url +} + +func ExampleNormalizeURL() { + if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { + panic(err) + } else { + normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) + fmt.Print(normalized) + } + + // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 +} +``` + +## API + +As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: + +```go +const ( + // Safe normalizations + FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 + FlagLowercaseHost // http://HOST -> http://host + FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF + FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA + FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ + FlagRemoveDefaultPort // http://host:80 -> http://host + FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path + + // Usually safe normalizations + FlagRemoveTrailingSlash // http://host/path/ -> http://host/path + FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) + FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c + + // Unsafe normalizations + FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ + FlagRemoveFragment // http://host/path#fragment -> http://host/path + FlagForceHTTP // https://host -> http://host + FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b + FlagRemoveWWW // http://www.host/ -> http://host/ + FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) + FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 + + // Normalizations not in the wikipedia article, required to cover tests cases + // submitted by jehiah + FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 + FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 + FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 + FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path + FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path + + // Convenience set of safe normalizations + FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator + + // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, + // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". + + // Convenience set of usually safe normalizations (includes FlagsSafe) + FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments + FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments + + // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) + FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery + FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery + + // Convenience set of all available flags + FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator + FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator +) +``` + +For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. + +The [full godoc reference is available on gopkgdoc][godoc]. + +Some things to note: + +* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. + +* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): + - %24 -> $ + - %26 -> & + - %2B-%3B -> +,-./0123456789:; + - %3D -> = + - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ + - %5F -> _ + - %61-%7A -> abcdefghijklmnopqrstuvwxyz + - %7E -> ~ + + +* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). + +* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. + +* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. + +### Safe vs Usually Safe vs Unsafe + +Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. + +Consider the following URL: + +`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` + +Normalizing with the `FlagsSafe` gives: + +`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` + +With the `FlagsUsuallySafeGreedy`: + +`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` + +And with `FlagsUnsafeGreedy`: + +`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` + +## TODOs + +* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. + +## Thanks / Contributions + +@rogpeppe +@jehiah +@opennota +@pchristopher1275 +@zenovich +@beeker1121 + +## License + +The [BSD 3-Clause license][bsd]. + +[bsd]: http://opensource.org/licenses/BSD-3-Clause +[wiki]: http://en.wikipedia.org/wiki/URL_normalization +[rfc]: http://tools.ietf.org/html/rfc3986#section-6 +[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell +[pr5]: https://github.com/PuerkitoBio/purell/pull/5 +[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go new file mode 100644 index 000000000000..6d0fc190a188 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/purell.go @@ -0,0 +1,379 @@ +/* +Package purell offers URL normalization as described on the wikipedia page: +http://en.wikipedia.org/wiki/URL_normalization +*/ +package purell + +import ( + "bytes" + "fmt" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/PuerkitoBio/urlesc" + "golang.org/x/net/idna" + "golang.org/x/text/unicode/norm" + "golang.org/x/text/width" +) + +// A set of normalization flags determines how a URL will +// be normalized. +type NormalizationFlags uint + +const ( + // Safe normalizations + FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 + FlagLowercaseHost // http://HOST -> http://host + FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF + FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA + FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ + FlagRemoveDefaultPort // http://host:80 -> http://host + FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path + + // Usually safe normalizations + FlagRemoveTrailingSlash // http://host/path/ -> http://host/path + FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) + FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c + + // Unsafe normalizations + FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ + FlagRemoveFragment // http://host/path#fragment -> http://host/path + FlagForceHTTP // https://host -> http://host + FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b + FlagRemoveWWW // http://www.host/ -> http://host/ + FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) + FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 + + // Normalizations not in the wikipedia article, required to cover tests cases + // submitted by jehiah + FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 + FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 + FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 + FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path + FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path + + // Convenience set of safe normalizations + FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator + + // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, + // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". + + // Convenience set of usually safe normalizations (includes FlagsSafe) + FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments + FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments + + // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) + FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery + FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery + + // Convenience set of all available flags + FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator + FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator +) + +const ( + defaultHttpPort = ":80" + defaultHttpsPort = ":443" +) + +// Regular expressions used by the normalizations +var rxPort = regexp.MustCompile(`(:\d+)/?$`) +var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) +var rxDupSlashes = regexp.MustCompile(`/{2,}`) +var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) +var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) +var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) +var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) +var rxEmptyPort = regexp.MustCompile(`:+$`) + +// Map of flags to implementation function. +// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically +// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. + +// Since maps have undefined traversing order, make a slice of ordered keys +var flagsOrder = []NormalizationFlags{ + FlagLowercaseScheme, + FlagLowercaseHost, + FlagRemoveDefaultPort, + FlagRemoveDirectoryIndex, + FlagRemoveDotSegments, + FlagRemoveFragment, + FlagForceHTTP, // Must be after remove default port (because https=443/http=80) + FlagRemoveDuplicateSlashes, + FlagRemoveWWW, + FlagAddWWW, + FlagSortQuery, + FlagDecodeDWORDHost, + FlagDecodeOctalHost, + FlagDecodeHexHost, + FlagRemoveUnnecessaryHostDots, + FlagRemoveEmptyPortSeparator, + FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last + FlagAddTrailingSlash, +} + +// ... and then the map, where order is unimportant +var flags = map[NormalizationFlags]func(*url.URL){ + FlagLowercaseScheme: lowercaseScheme, + FlagLowercaseHost: lowercaseHost, + FlagRemoveDefaultPort: removeDefaultPort, + FlagRemoveDirectoryIndex: removeDirectoryIndex, + FlagRemoveDotSegments: removeDotSegments, + FlagRemoveFragment: removeFragment, + FlagForceHTTP: forceHTTP, + FlagRemoveDuplicateSlashes: removeDuplicateSlashes, + FlagRemoveWWW: removeWWW, + FlagAddWWW: addWWW, + FlagSortQuery: sortQuery, + FlagDecodeDWORDHost: decodeDWORDHost, + FlagDecodeOctalHost: decodeOctalHost, + FlagDecodeHexHost: decodeHexHost, + FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, + FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, + FlagRemoveTrailingSlash: removeTrailingSlash, + FlagAddTrailingSlash: addTrailingSlash, +} + +// MustNormalizeURLString returns the normalized string, and panics if an error occurs. +// It takes an URL string as input, as well as the normalization flags. +func MustNormalizeURLString(u string, f NormalizationFlags) string { + result, e := NormalizeURLString(u, f) + if e != nil { + panic(e) + } + return result +} + +// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. +// It takes an URL string as input, as well as the normalization flags. +func NormalizeURLString(u string, f NormalizationFlags) (string, error) { + parsed, err := url.Parse(u) + if err != nil { + return "", err + } + + if f&FlagLowercaseHost == FlagLowercaseHost { + parsed.Host = strings.ToLower(parsed.Host) + } + + // The idna package doesn't fully conform to RFC 5895 + // (https://tools.ietf.org/html/rfc5895), so we do it here. + // Taken from Go 1.8 cycle source, courtesy of bradfitz. + // TODO: Remove when (if?) idna package conforms to RFC 5895. + parsed.Host = width.Fold.String(parsed.Host) + parsed.Host = norm.NFC.String(parsed.Host) + if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil { + return "", err + } + + return NormalizeURL(parsed, f), nil +} + +// NormalizeURL returns the normalized string. +// It takes a parsed URL object as input, as well as the normalization flags. +func NormalizeURL(u *url.URL, f NormalizationFlags) string { + for _, k := range flagsOrder { + if f&k == k { + flags[k](u) + } + } + return urlesc.Escape(u) +} + +func lowercaseScheme(u *url.URL) { + if len(u.Scheme) > 0 { + u.Scheme = strings.ToLower(u.Scheme) + } +} + +func lowercaseHost(u *url.URL) { + if len(u.Host) > 0 { + u.Host = strings.ToLower(u.Host) + } +} + +func removeDefaultPort(u *url.URL) { + if len(u.Host) > 0 { + scheme := strings.ToLower(u.Scheme) + u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { + if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { + return "" + } + return val + }) + } +} + +func removeTrailingSlash(u *url.URL) { + if l := len(u.Path); l > 0 { + if strings.HasSuffix(u.Path, "/") { + u.Path = u.Path[:l-1] + } + } else if l = len(u.Host); l > 0 { + if strings.HasSuffix(u.Host, "/") { + u.Host = u.Host[:l-1] + } + } +} + +func addTrailingSlash(u *url.URL) { + if l := len(u.Path); l > 0 { + if !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + } + } else if l = len(u.Host); l > 0 { + if !strings.HasSuffix(u.Host, "/") { + u.Host += "/" + } + } +} + +func removeDotSegments(u *url.URL) { + if len(u.Path) > 0 { + var dotFree []string + var lastIsDot bool + + sections := strings.Split(u.Path, "/") + for _, s := range sections { + if s == ".." { + if len(dotFree) > 0 { + dotFree = dotFree[:len(dotFree)-1] + } + } else if s != "." { + dotFree = append(dotFree, s) + } + lastIsDot = (s == "." || s == "..") + } + // Special case if host does not end with / and new path does not begin with / + u.Path = strings.Join(dotFree, "/") + if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + // Special case if the last segment was a dot, make sure the path ends with a slash + if lastIsDot && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + } + } +} + +func removeDirectoryIndex(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") + } +} + +func removeFragment(u *url.URL) { + u.Fragment = "" +} + +func forceHTTP(u *url.URL) { + if strings.ToLower(u.Scheme) == "https" { + u.Scheme = "http" + } +} + +func removeDuplicateSlashes(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") + } +} + +func removeWWW(u *url.URL) { + if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { + u.Host = u.Host[4:] + } +} + +func addWWW(u *url.URL) { + if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { + u.Host = "www." + u.Host + } +} + +func sortQuery(u *url.URL) { + q := u.Query() + + if len(q) > 0 { + arKeys := make([]string, len(q)) + i := 0 + for k := range q { + arKeys[i] = k + i++ + } + sort.Strings(arKeys) + buf := new(bytes.Buffer) + for _, k := range arKeys { + sort.Strings(q[k]) + for _, v := range q[k] { + if buf.Len() > 0 { + buf.WriteRune('&') + } + buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) + } + } + + // Rebuild the raw query string + u.RawQuery = buf.String() + } +} + +func decodeDWORDHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { + var parts [4]int64 + + dword, _ := strconv.ParseInt(matches[1], 10, 0) + for i, shift := range []uint{24, 16, 8, 0} { + parts[i] = dword >> shift & 0xFF + } + u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) + } + } +} + +func decodeOctalHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { + var parts [4]int64 + + for i := 1; i <= 4; i++ { + parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) + } + u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) + } + } +} + +func decodeHexHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { + // Conversion is safe because of regex validation + parsed, _ := strconv.ParseInt(matches[1], 16, 0) + // Set host as DWORD (base 10) encoded host + u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) + // The rest is the same as decoding a DWORD host + decodeDWORDHost(u) + } + } +} + +func removeUnncessaryHostDots(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { + // Trim the leading and trailing dots + u.Host = strings.Trim(matches[1], ".") + if len(matches) > 2 { + u.Host += matches[2] + } + } + } +} + +func removeEmptyPortSeparator(u *url.URL) { + if len(u.Host) > 0 { + u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") + } +} diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml new file mode 100644 index 000000000000..ba6b225f91e2 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml @@ -0,0 +1,15 @@ +language: go + +go: + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - tip + +install: + - go build . + +script: + - go test -v diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE new file mode 100644 index 000000000000..74487567632c --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md new file mode 100644 index 000000000000..57aff0a5396d --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/README.md @@ -0,0 +1,16 @@ +urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) +====== + +Package urlesc implements query escaping as per RFC 3986. + +It contains some parts of the net/url package, modified so as to allow +some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). + +## Install + + go get github.com/PuerkitoBio/urlesc + +## License + +Go license (BSD-3-Clause) + diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go new file mode 100644 index 000000000000..1b84624594d0 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go @@ -0,0 +1,180 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package urlesc implements query escaping as per RFC 3986. +// It contains some parts of the net/url package, modified so as to allow +// some reserved characters incorrectly escaped by net/url. +// See https://github.com/golang/go/issues/5684 +package urlesc + +import ( + "bytes" + "net/url" + "strings" +) + +type encoding int + +const ( + encodePath encoding = 1 + iota + encodeUserPassword + encodeQueryComponent + encodeFragment +) + +// Return true if the specified character should be escaped when +// appearing in a URL string, according to RFC 3986. +func shouldEscape(c byte, mode encoding) bool { + // §2.3 Unreserved characters (alphanum) + if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { + return false + } + + switch c { + case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) + return false + + // §2.2 Reserved characters (reserved) + case ':', '/', '?', '#', '[', ']', '@', // gen-delims + '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims + // Different sections of the URL allow a few of + // the reserved characters to appear unescaped. + switch mode { + case encodePath: // §3.3 + // The RFC allows sub-delims and : @. + // '/', '[' and ']' can be used to assign meaning to individual path + // segments. This package only manipulates the path as a whole, + // so we allow those as well. That leaves only ? and # to escape. + return c == '?' || c == '#' + + case encodeUserPassword: // §3.2.1 + // The RFC allows : and sub-delims in + // userinfo. The parsing of userinfo treats ':' as special so we must escape + // all the gen-delims. + return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' + + case encodeQueryComponent: // §3.4 + // The RFC allows / and ?. + return c != '/' && c != '?' + + case encodeFragment: // §4.1 + // The RFC text is silent but the grammar allows + // everything, so escape nothing but # + return c == '#' + } + } + + // Everything else must be escaped. + return true +} + +// QueryEscape escapes the string so it can be safely placed +// inside a URL query. +func QueryEscape(s string) string { + return escape(s, encodeQueryComponent) +} + +func escape(s string, mode encoding) string { + spaceCount, hexCount := 0, 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c, mode) { + if c == ' ' && mode == encodeQueryComponent { + spaceCount++ + } else { + hexCount++ + } + } + } + + if spaceCount == 0 && hexCount == 0 { + return s + } + + t := make([]byte, len(s)+2*hexCount) + j := 0 + for i := 0; i < len(s); i++ { + switch c := s[i]; { + case c == ' ' && mode == encodeQueryComponent: + t[j] = '+' + j++ + case shouldEscape(c, mode): + t[j] = '%' + t[j+1] = "0123456789ABCDEF"[c>>4] + t[j+2] = "0123456789ABCDEF"[c&15] + j += 3 + default: + t[j] = s[i] + j++ + } + } + return string(t) +} + +var uiReplacer = strings.NewReplacer( + "%21", "!", + "%27", "'", + "%28", "(", + "%29", ")", + "%2A", "*", +) + +// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. +func unescapeUserinfo(s string) string { + return uiReplacer.Replace(s) +} + +// Escape reassembles the URL into a valid URL string. +// The general form of the result is one of: +// +// scheme:opaque +// scheme://userinfo@host/path?query#fragment +// +// If u.Opaque is non-empty, String uses the first form; +// otherwise it uses the second form. +// +// In the second form, the following rules apply: +// - if u.Scheme is empty, scheme: is omitted. +// - if u.User is nil, userinfo@ is omitted. +// - if u.Host is empty, host/ is omitted. +// - if u.Scheme and u.Host are empty and u.User is nil, +// the entire scheme://userinfo@host/ is omitted. +// - if u.Host is non-empty and u.Path begins with a /, +// the form host/path does not add its own /. +// - if u.RawQuery is empty, ?query is omitted. +// - if u.Fragment is empty, #fragment is omitted. +func Escape(u *url.URL) string { + var buf bytes.Buffer + if u.Scheme != "" { + buf.WriteString(u.Scheme) + buf.WriteByte(':') + } + if u.Opaque != "" { + buf.WriteString(u.Opaque) + } else { + if u.Scheme != "" || u.Host != "" || u.User != nil { + buf.WriteString("//") + if ui := u.User; ui != nil { + buf.WriteString(unescapeUserinfo(ui.String())) + buf.WriteByte('@') + } + if h := u.Host; h != "" { + buf.WriteString(h) + } + } + if u.Path != "" && u.Path[0] != '/' && u.Host != "" { + buf.WriteByte('/') + } + buf.WriteString(escape(u.Path, encodePath)) + } + if u.RawQuery != "" { + buf.WriteByte('?') + buf.WriteString(u.RawQuery) + } + if u.Fragment != "" { + buf.WriteByte('#') + buf.WriteString(escape(u.Fragment, encodeFragment)) + } + return buf.String() +} diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml new file mode 100644 index 000000000000..e29f8eef5efd --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip + +notifications: + email: + - bwatas@gmail.com diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md new file mode 100644 index 000000000000..f0f7e3a8add0 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md @@ -0,0 +1,63 @@ +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Financial contributions + +We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). +Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. + + +## Credits + + +### Contributors + +Thank you to all the people who have already contributed to govalidator! + + + +### Backers + +Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE new file mode 100644 index 000000000000..2f9a31fadf67 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Alex Saskevich + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md new file mode 100644 index 000000000000..40f9a87811b3 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/README.md @@ -0,0 +1,507 @@ +govalidator +=========== +[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043) +[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) + +A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). + +#### Installation +Make sure that Go is installed on your computer. +Type the following command in your terminal: + + go get github.com/asaskevich/govalidator + +or you can get specified release of the package with `gopkg.in`: + + go get gopkg.in/asaskevich/govalidator.v4 + +After it the package is ready to use. + + +#### Import package in your project +Add following line in your `*.go` file: +```go +import "github.com/asaskevich/govalidator" +``` +If you are unhappy to use long `govalidator`, you can do something like this: +```go +import ( + valid "github.com/asaskevich/govalidator" +) +``` + +#### Activate behavior to require all fields have a validation tag by default +`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. + +`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. + +```go +import "github.com/asaskevich/govalidator" + +func init() { + govalidator.SetFieldsRequiredByDefault(true) +} +``` + +Here's some code to explain it: +```go +// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +type exampleStruct struct { + Name string `` + Email string `valid:"email"` +} + +// this, however, will only fail when Email is empty or an invalid email address: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email"` +} + +// lastly, this will only fail when Email is an invalid email address but not when it's empty: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email,optional"` +} +``` + +#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) +##### Custom validator function signature +A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. +```go +import "github.com/asaskevich/govalidator" + +// old signature +func(i interface{}) bool + +// new signature +func(i interface{}, o interface{}) bool +``` + +##### Adding a custom validator +This was changed to prevent data races when accessing custom validators. +```go +import "github.com/asaskevich/govalidator" + +// before +govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool { + // ... +}) + +// after +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool { + // ... +})) +``` + +#### List of functions: +```go +func Abs(value float64) float64 +func BlackList(str, chars string) string +func ByteLength(str string, params ...string) bool +func CamelCaseToUnderscore(str string) string +func Contains(str, substring string) bool +func Count(array []interface{}, iterator ConditionIterator) int +func Each(array []interface{}, iterator Iterator) +func ErrorByField(e error, field string) string +func ErrorsByField(e error) map[string]string +func Filter(array []interface{}, iterator ConditionIterator) []interface{} +func Find(array []interface{}, iterator ConditionIterator) interface{} +func GetLine(s string, index int) (string, error) +func GetLines(s string) []string +func InRange(value, left, right float64) bool +func IsASCII(str string) bool +func IsAlpha(str string) bool +func IsAlphanumeric(str string) bool +func IsBase64(str string) bool +func IsByteLength(str string, min, max int) bool +func IsCIDR(str string) bool +func IsCreditCard(str string) bool +func IsDNSName(str string) bool +func IsDataURI(str string) bool +func IsDialString(str string) bool +func IsDivisibleBy(str, num string) bool +func IsEmail(str string) bool +func IsFilePath(str string) (bool, int) +func IsFloat(str string) bool +func IsFullWidth(str string) bool +func IsHalfWidth(str string) bool +func IsHexadecimal(str string) bool +func IsHexcolor(str string) bool +func IsHost(str string) bool +func IsIP(str string) bool +func IsIPv4(str string) bool +func IsIPv6(str string) bool +func IsISBN(str string, version int) bool +func IsISBN10(str string) bool +func IsISBN13(str string) bool +func IsISO3166Alpha2(str string) bool +func IsISO3166Alpha3(str string) bool +func IsISO693Alpha2(str string) bool +func IsISO693Alpha3b(str string) bool +func IsISO4217(str string) bool +func IsIn(str string, params ...string) bool +func IsInt(str string) bool +func IsJSON(str string) bool +func IsLatitude(str string) bool +func IsLongitude(str string) bool +func IsLowerCase(str string) bool +func IsMAC(str string) bool +func IsMongoID(str string) bool +func IsMultibyte(str string) bool +func IsNatural(value float64) bool +func IsNegative(value float64) bool +func IsNonNegative(value float64) bool +func IsNonPositive(value float64) bool +func IsNull(str string) bool +func IsNumeric(str string) bool +func IsPort(str string) bool +func IsPositive(value float64) bool +func IsPrintableASCII(str string) bool +func IsRFC3339(str string) bool +func IsRFC3339WithoutZone(str string) bool +func IsRGBcolor(str string) bool +func IsRequestURI(rawurl string) bool +func IsRequestURL(rawurl string) bool +func IsSSN(str string) bool +func IsSemver(str string) bool +func IsTime(str string, format string) bool +func IsURL(str string) bool +func IsUTFDigit(str string) bool +func IsUTFLetter(str string) bool +func IsUTFLetterNumeric(str string) bool +func IsUTFNumeric(str string) bool +func IsUUID(str string) bool +func IsUUIDv3(str string) bool +func IsUUIDv4(str string) bool +func IsUUIDv5(str string) bool +func IsUpperCase(str string) bool +func IsVariableWidth(str string) bool +func IsWhole(value float64) bool +func LeftTrim(str, chars string) string +func Map(array []interface{}, iterator ResultIterator) []interface{} +func Matches(str, pattern string) bool +func NormalizeEmail(str string) (string, error) +func PadBoth(str string, padStr string, padLen int) string +func PadLeft(str string, padStr string, padLen int) string +func PadRight(str string, padStr string, padLen int) string +func Range(str string, params ...string) bool +func RemoveTags(s string) string +func ReplacePattern(str, pattern, replace string) string +func Reverse(s string) string +func RightTrim(str, chars string) string +func RuneLength(str string, params ...string) bool +func SafeFileName(str string) string +func SetFieldsRequiredByDefault(value bool) +func Sign(value float64) float64 +func StringLength(str string, params ...string) bool +func StringMatches(s string, params ...string) bool +func StripLow(str string, keepNewLines bool) string +func ToBoolean(str string) (bool, error) +func ToFloat(str string) (float64, error) +func ToInt(str string) (int64, error) +func ToJSON(obj interface{}) (string, error) +func ToString(obj interface{}) string +func Trim(str, chars string) string +func Truncate(str string, length int, ending string) string +func UnderscoreToCamelCase(s string) string +func ValidateStruct(s interface{}) (bool, error) +func WhiteList(str, chars string) string +type ConditionIterator +type CustomTypeValidator +type Error +func (e Error) Error() string +type Errors +func (es Errors) Error() string +func (es Errors) Errors() []error +type ISO3166Entry +type Iterator +type ParamValidator +type ResultIterator +type UnsupportedTypeError +func (e *UnsupportedTypeError) Error() string +type Validator +``` + +#### Examples +###### IsURL +```go +println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) +``` +###### ToString +```go +type User struct { + FirstName string + LastName string +} + +str := govalidator.ToString(&User{"John", "Juan"}) +println(str) +``` +###### Each, Map, Filter, Count for slices +Each iterates over the slice/array and calls Iterator for every item +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.Iterator = func(value interface{}, index int) { + println(value.(int)) +} +govalidator.Each(data, fn) +``` +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { + return value.(int) * 3 +} +_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} +``` +```go +data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} +var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { + return value.(int)%2 == 0 +} +_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} +_ = govalidator.Count(data, fn) // result = 5 +``` +###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) +If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: +```go +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) +``` +For completely custom validators (interface-based), see below. + +Here is a list of available validators for struct fields (validator - used function): +```go +"email": IsEmail, +"url": IsURL, +"dialstring": IsDialString, +"requrl": IsRequestURL, +"requri": IsRequestURI, +"alpha": IsAlpha, +"utfletter": IsUTFLetter, +"alphanum": IsAlphanumeric, +"utfletternum": IsUTFLetterNumeric, +"numeric": IsNumeric, +"utfnumeric": IsUTFNumeric, +"utfdigit": IsUTFDigit, +"hexadecimal": IsHexadecimal, +"hexcolor": IsHexcolor, +"rgbcolor": IsRGBcolor, +"lowercase": IsLowerCase, +"uppercase": IsUpperCase, +"int": IsInt, +"float": IsFloat, +"null": IsNull, +"uuid": IsUUID, +"uuidv3": IsUUIDv3, +"uuidv4": IsUUIDv4, +"uuidv5": IsUUIDv5, +"creditcard": IsCreditCard, +"isbn10": IsISBN10, +"isbn13": IsISBN13, +"json": IsJSON, +"multibyte": IsMultibyte, +"ascii": IsASCII, +"printableascii": IsPrintableASCII, +"fullwidth": IsFullWidth, +"halfwidth": IsHalfWidth, +"variablewidth": IsVariableWidth, +"base64": IsBase64, +"datauri": IsDataURI, +"ip": IsIP, +"port": IsPort, +"ipv4": IsIPv4, +"ipv6": IsIPv6, +"dns": IsDNSName, +"host": IsHost, +"mac": IsMAC, +"latitude": IsLatitude, +"longitude": IsLongitude, +"ssn": IsSSN, +"semver": IsSemver, +"rfc3339": IsRFC3339, +"rfc3339WithoutZone": IsRFC3339WithoutZone, +"ISO3166Alpha2": IsISO3166Alpha2, +"ISO3166Alpha3": IsISO3166Alpha3, +``` +Validators with parameters + +```go +"range(min|max)": Range, +"length(min|max)": ByteLength, +"runelength(min|max)": RuneLength, +"stringlength(min|max)": StringLength, +"matches(pattern)": StringMatches, +"in(string1|string2|...|stringN)": IsIn, +"rsapub(keylength)" : IsRsaPub, +``` + +And here is small example of usage: +```go +type Post struct { + Title string `valid:"alphanum,required"` + Message string `valid:"duck,ascii"` + Message2 string `valid:"animal(dog)"` + AuthorIP string `valid:"ipv4"` + Date string `valid:"-"` +} +post := &Post{ + Title: "My Example Post", + Message: "duck", + Message2: "dog", + AuthorIP: "123.234.54.3", +} + +// Add your own struct validation tags +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) + +// Add your own struct validation tags with parameter +govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool { + species := params[0] + return str == species +}) +govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$") + +result, err := govalidator.ValidateStruct(post) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` +###### WhiteList +```go +// Remove all characters from string ignoring characters between "a" and "z" +println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") +``` + +###### Custom validation functions +Custom validation using your own domain specific validators is also available - here's an example of how to use it: +```go +import "github.com/asaskevich/govalidator" + +type CustomByteArray [6]byte // custom types are supported and can be validated + +type StructWithCustomByteArray struct { + ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence + Email string `valid:"email"` + CustomMinLength int `valid:"-"` +} + +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool { + switch v := context.(type) { // you can type switch on the context interface being validated + case StructWithCustomByteArray: + // you can check and validate against some other field in the context, + // return early or not validate against the context at all – your choice + case SomeOtherType: + // ... + default: + // expecting some other type? Throw/panic here or continue + } + + switch v := i.(type) { // type switch on the struct field being validated + case CustomByteArray: + for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes + if e != 0 { + return true + } + } + } + return false +})) +govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool { + switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation + case StructWithCustomByteArray: + return len(v.ID) >= v.CustomMinLength + } + return false +})) +``` + +###### Custom error messages +Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: +```go +type Ticket struct { + Id int64 `json:"id"` + FirstName string `json:"firstname" valid:"required~First name is blank"` +} +``` + +#### Notes +Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). +Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). + +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Credits +### Contributors + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + +#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) +* [Daniel Lohse](https://github.com/annismckenzie) +* [Attila Oláh](https://github.com/attilaolah) +* [Daniel Korner](https://github.com/Dadie) +* [Steven Wilkin](https://github.com/stevenwilkin) +* [Deiwin Sarjas](https://github.com/deiwin) +* [Noah Shibley](https://github.com/slugmobile) +* [Nathan Davies](https://github.com/nathj07) +* [Matt Sanford](https://github.com/mzsanford) +* [Simon ccl1115](https://github.com/ccl1115) + + + + +### Backers + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] + + + + + + + + + + + + + + + +## License +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go new file mode 100644 index 000000000000..5bace2654d3b --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/arrays.go @@ -0,0 +1,58 @@ +package govalidator + +// Iterator is the function that accepts element of slice/array and its index +type Iterator func(interface{}, int) + +// ResultIterator is the function that accepts element of slice/array and its index and returns any result +type ResultIterator func(interface{}, int) interface{} + +// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean +type ConditionIterator func(interface{}, int) bool + +// Each iterates over the slice and apply Iterator to every item +func Each(array []interface{}, iterator Iterator) { + for index, data := range array { + iterator(data, index) + } +} + +// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. +func Map(array []interface{}, iterator ResultIterator) []interface{} { + var result = make([]interface{}, len(array)) + for index, data := range array { + result[index] = iterator(data, index) + } + return result +} + +// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. +func Find(array []interface{}, iterator ConditionIterator) interface{} { + for index, data := range array { + if iterator(data, index) { + return data + } + } + return nil +} + +// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. +func Filter(array []interface{}, iterator ConditionIterator) []interface{} { + var result = make([]interface{}, 0) + for index, data := range array { + if iterator(data, index) { + result = append(result, data) + } + } + return result +} + +// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. +func Count(array []interface{}, iterator ConditionIterator) int { + count := 0 + for index, data := range array { + if iterator(data, index) { + count = count + 1 + } + } + return count +} diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go new file mode 100644 index 000000000000..cf1e5d569ba0 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/converter.go @@ -0,0 +1,64 @@ +package govalidator + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" +) + +// ToString convert the input to a string. +func ToString(obj interface{}) string { + res := fmt.Sprintf("%v", obj) + return string(res) +} + +// ToJSON convert the input to a valid JSON string +func ToJSON(obj interface{}) (string, error) { + res, err := json.Marshal(obj) + if err != nil { + res = []byte("") + } + return string(res), err +} + +// ToFloat convert the input string to a float, or 0.0 if the input is not a float. +func ToFloat(str string) (float64, error) { + res, err := strconv.ParseFloat(str, 64) + if err != nil { + res = 0.0 + } + return res, err +} + +// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. +func ToInt(value interface{}) (res int64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = val.Int() + case uint, uint8, uint16, uint32, uint64: + res = int64(val.Uint()) + case string: + if IsInt(val.String()) { + res, err = strconv.ParseInt(val.String(), 0, 64) + if err != nil { + res = 0 + } + } else { + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + default: + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + + return +} + +// ToBoolean convert the input string to a boolean. +func ToBoolean(str string) (bool, error) { + return strconv.ParseBool(str) +} diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go new file mode 100644 index 000000000000..655b750cb8f6 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/error.go @@ -0,0 +1,43 @@ +package govalidator + +import "strings" + +// Errors is an array of multiple errors and conforms to the error interface. +type Errors []error + +// Errors returns itself. +func (es Errors) Errors() []error { + return es +} + +func (es Errors) Error() string { + var errs []string + for _, e := range es { + errs = append(errs, e.Error()) + } + return strings.Join(errs, ";") +} + +// Error encapsulates a name, an error and whether there's a custom error message or not. +type Error struct { + Name string + Err error + CustomErrorMessageExists bool + + // Validator indicates the name of the validator that failed + Validator string + Path []string +} + +func (e Error) Error() string { + if e.CustomErrorMessageExists { + return e.Err.Error() + } + + errName := e.Name + if len(e.Path) > 0 { + errName = strings.Join(append(e.Path, e.Name), ".") + } + + return errName + ": " + e.Err.Error() +} diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go new file mode 100644 index 000000000000..7e6c652e140c --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/numerics.go @@ -0,0 +1,97 @@ +package govalidator + +import ( + "math" + "reflect" +) + +// Abs returns absolute value of number +func Abs(value float64) float64 { + return math.Abs(value) +} + +// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise +func Sign(value float64) float64 { + if value > 0 { + return 1 + } else if value < 0 { + return -1 + } else { + return 0 + } +} + +// IsNegative returns true if value < 0 +func IsNegative(value float64) bool { + return value < 0 +} + +// IsPositive returns true if value > 0 +func IsPositive(value float64) bool { + return value > 0 +} + +// IsNonNegative returns true if value >= 0 +func IsNonNegative(value float64) bool { + return value >= 0 +} + +// IsNonPositive returns true if value <= 0 +func IsNonPositive(value float64) bool { + return value <= 0 +} + +// InRange returns true if value lies between left and right border +func InRangeInt(value, left, right interface{}) bool { + value64, _ := ToInt(value) + left64, _ := ToInt(left) + right64, _ := ToInt(right) + if left64 > right64 { + left64, right64 = right64, left64 + } + return value64 >= left64 && value64 <= right64 +} + +// InRange returns true if value lies between left and right border +func InRangeFloat32(value, left, right float32) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border +func InRangeFloat64(value, left, right float64) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type +func InRange(value interface{}, left interface{}, right interface{}) bool { + + reflectValue := reflect.TypeOf(value).Kind() + reflectLeft := reflect.TypeOf(left).Kind() + reflectRight := reflect.TypeOf(right).Kind() + + if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int { + return InRangeInt(value.(int), left.(int), right.(int)) + } else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 { + return InRangeFloat32(value.(float32), left.(float32), right.(float32)) + } else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 { + return InRangeFloat64(value.(float64), left.(float64), right.(float64)) + } else { + return false + } +} + +// IsWhole returns true if value is whole number +func IsWhole(value float64) bool { + return math.Remainder(value, 1) == 0 +} + +// IsNatural returns true if value is natural number (positive and whole) +func IsNatural(value float64) bool { + return IsWhole(value) && IsPositive(value) +} diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go new file mode 100644 index 000000000000..61a05d438e18 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/patterns.go @@ -0,0 +1,101 @@ +package govalidator + +import "regexp" + +// Basic regular expressions for validating strings +const ( + Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$" + ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" + ISBN13 string = "^(?:[0-9]{13})$" + UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + Alpha string = "^[a-zA-Z]+$" + Alphanumeric string = "^[a-zA-Z0-9]+$" + Numeric string = "^[0-9]+$" + Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" + Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" + Hexadecimal string = "^[0-9a-fA-F]+$" + Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" + ASCII string = "^[\x00-\x7F]+$" + Multibyte string = "[^\x00-\x7F]" + FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + PrintableASCII string = "^[\x20-\x7E]+$" + DataURI string = "^data:.+\\/(.+);base64$" + Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` + IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` + URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` + URLUsername string = `(\S+(:\S*)?@)` + URLPath string = `((\/|\?|#)[^\s]*)` + URLPort string = `(:(\d{1,5}))` + URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))` + URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` + URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` + SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` + WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` + UnixPath string = `^(/[^/\x00]*)+/?$` + Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" + tagName string = "valid" + hasLowerCase string = ".*[[:lower:]]" + hasUpperCase string = ".*[[:upper:]]" + hasWhitespace string = ".*[[:space:]]" + hasWhitespaceOnly string = "^[[:space:]]+$" +) + +// Used by IsFilePath func +const ( + // Unknown is unresolved OS type + Unknown = iota + // Win is Windows type + Win + // Unix is *nix OS types + Unix +) + +var ( + userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") + hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") + userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") + rxEmail = regexp.MustCompile(Email) + rxCreditCard = regexp.MustCompile(CreditCard) + rxISBN10 = regexp.MustCompile(ISBN10) + rxISBN13 = regexp.MustCompile(ISBN13) + rxUUID3 = regexp.MustCompile(UUID3) + rxUUID4 = regexp.MustCompile(UUID4) + rxUUID5 = regexp.MustCompile(UUID5) + rxUUID = regexp.MustCompile(UUID) + rxAlpha = regexp.MustCompile(Alpha) + rxAlphanumeric = regexp.MustCompile(Alphanumeric) + rxNumeric = regexp.MustCompile(Numeric) + rxInt = regexp.MustCompile(Int) + rxFloat = regexp.MustCompile(Float) + rxHexadecimal = regexp.MustCompile(Hexadecimal) + rxHexcolor = regexp.MustCompile(Hexcolor) + rxRGBcolor = regexp.MustCompile(RGBcolor) + rxASCII = regexp.MustCompile(ASCII) + rxPrintableASCII = regexp.MustCompile(PrintableASCII) + rxMultibyte = regexp.MustCompile(Multibyte) + rxFullWidth = regexp.MustCompile(FullWidth) + rxHalfWidth = regexp.MustCompile(HalfWidth) + rxBase64 = regexp.MustCompile(Base64) + rxDataURI = regexp.MustCompile(DataURI) + rxLatitude = regexp.MustCompile(Latitude) + rxLongitude = regexp.MustCompile(Longitude) + rxDNSName = regexp.MustCompile(DNSName) + rxURL = regexp.MustCompile(URL) + rxSSN = regexp.MustCompile(SSN) + rxWinPath = regexp.MustCompile(WinPath) + rxUnixPath = regexp.MustCompile(UnixPath) + rxSemver = regexp.MustCompile(Semver) + rxHasLowerCase = regexp.MustCompile(hasLowerCase) + rxHasUpperCase = regexp.MustCompile(hasUpperCase) + rxHasWhitespace = regexp.MustCompile(hasWhitespace) + rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) +) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go new file mode 100644 index 000000000000..4f7e9274ade0 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/types.go @@ -0,0 +1,636 @@ +package govalidator + +import ( + "reflect" + "regexp" + "sort" + "sync" +) + +// Validator is a wrapper for a validator function that returns bool and accepts string. +type Validator func(str string) bool + +// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. +// The second parameter should be the context (in the case of validating a struct: the whole object being validated). +type CustomTypeValidator func(i interface{}, o interface{}) bool + +// ParamValidator is a wrapper for validator functions that accepts additional parameters. +type ParamValidator func(str string, params ...string) bool +type tagOptionsMap map[string]tagOption + +func (t tagOptionsMap) orderedKeys() []string { + var keys []string + for k := range t { + keys = append(keys, k) + } + + sort.Slice(keys, func(a, b int) bool { + return t[keys[a]].order < t[keys[b]].order + }) + + return keys +} + +type tagOption struct { + name string + customErrorMessage string + order int +} + +// UnsupportedTypeError is a wrapper for reflect.Type +type UnsupportedTypeError struct { + Type reflect.Type +} + +// stringValues is a slice of reflect.Value holding *reflect.StringValue. +// It implements the methods to sort by string. +type stringValues []reflect.Value + +// ParamTagMap is a map of functions accept variants parameters +var ParamTagMap = map[string]ParamValidator{ + "length": ByteLength, + "range": Range, + "runelength": RuneLength, + "stringlength": StringLength, + "matches": StringMatches, + "in": isInRaw, + "rsapub": IsRsaPub, +} + +// ParamTagRegexMap maps param tags to their respective regexes. +var ParamTagRegexMap = map[string]*regexp.Regexp{ + "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), + "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), + "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), + "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), + "in": regexp.MustCompile(`^in\((.*)\)`), + "matches": regexp.MustCompile(`^matches\((.+)\)$`), + "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), +} + +type customTypeTagMap struct { + validators map[string]CustomTypeValidator + + sync.RWMutex +} + +func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { + tm.RLock() + defer tm.RUnlock() + v, ok := tm.validators[name] + return v, ok +} + +func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { + tm.Lock() + defer tm.Unlock() + tm.validators[name] = ctv +} + +// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. +// Use this to validate compound or custom types that need to be handled as a whole, e.g. +// `type UUID [16]byte` (this would be handled as an array of bytes). +var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} + +// TagMap is a map of functions, that can be used as tags for ValidateStruct function. +var TagMap = map[string]Validator{ + "email": IsEmail, + "url": IsURL, + "dialstring": IsDialString, + "requrl": IsRequestURL, + "requri": IsRequestURI, + "alpha": IsAlpha, + "utfletter": IsUTFLetter, + "alphanum": IsAlphanumeric, + "utfletternum": IsUTFLetterNumeric, + "numeric": IsNumeric, + "utfnumeric": IsUTFNumeric, + "utfdigit": IsUTFDigit, + "hexadecimal": IsHexadecimal, + "hexcolor": IsHexcolor, + "rgbcolor": IsRGBcolor, + "lowercase": IsLowerCase, + "uppercase": IsUpperCase, + "int": IsInt, + "float": IsFloat, + "null": IsNull, + "uuid": IsUUID, + "uuidv3": IsUUIDv3, + "uuidv4": IsUUIDv4, + "uuidv5": IsUUIDv5, + "creditcard": IsCreditCard, + "isbn10": IsISBN10, + "isbn13": IsISBN13, + "json": IsJSON, + "multibyte": IsMultibyte, + "ascii": IsASCII, + "printableascii": IsPrintableASCII, + "fullwidth": IsFullWidth, + "halfwidth": IsHalfWidth, + "variablewidth": IsVariableWidth, + "base64": IsBase64, + "datauri": IsDataURI, + "ip": IsIP, + "port": IsPort, + "ipv4": IsIPv4, + "ipv6": IsIPv6, + "dns": IsDNSName, + "host": IsHost, + "mac": IsMAC, + "latitude": IsLatitude, + "longitude": IsLongitude, + "ssn": IsSSN, + "semver": IsSemver, + "rfc3339": IsRFC3339, + "rfc3339WithoutZone": IsRFC3339WithoutZone, + "ISO3166Alpha2": IsISO3166Alpha2, + "ISO3166Alpha3": IsISO3166Alpha3, + "ISO4217": IsISO4217, +} + +// ISO3166Entry stores country codes +type ISO3166Entry struct { + EnglishShortName string + FrenchShortName string + Alpha2Code string + Alpha3Code string + Numeric string +} + +//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" +var ISO3166List = []ISO3166Entry{ + {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, + {"Albania", "Albanie (l')", "AL", "ALB", "008"}, + {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, + {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, + {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, + {"Andorra", "Andorre (l')", "AD", "AND", "020"}, + {"Angola", "Angola (l')", "AO", "AGO", "024"}, + {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, + {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, + {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, + {"Australia", "Australie (l')", "AU", "AUS", "036"}, + {"Austria", "Autriche (l')", "AT", "AUT", "040"}, + {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, + {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, + {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, + {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, + {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, + {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, + {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, + {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, + {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, + {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, + {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, + {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"}, + {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, + {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, + {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, + {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"}, + {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"}, + {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, + {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, + {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, + {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, + {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, + {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, + {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, + {"Canada", "Canada (le)", "CA", "CAN", "124"}, + {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, + {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"}, + {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, + {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, + {"Chad", "Tchad (le)", "TD", "TCD", "148"}, + {"Chile", "Chili (le)", "CL", "CHL", "152"}, + {"China", "Chine (la)", "CN", "CHN", "156"}, + {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, + {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"}, + {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"}, + {"Colombia", "Colombie (la)", "CO", "COL", "170"}, + {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, + {"Mayotte", "Mayotte", "YT", "MYT", "175"}, + {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, + {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, + {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"}, + {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, + {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, + {"Cuba", "Cuba", "CU", "CUB", "192"}, + {"Cyprus", "Chypre", "CY", "CYP", "196"}, + {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, + {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, + {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, + {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, + {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, + {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, + {"El Salvador", "El Salvador", "SV", "SLV", "222"}, + {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, + {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, + {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, + {"Estonia", "Estonie (l')", "EE", "EST", "233"}, + {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"}, + {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"}, + {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"}, + {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, + {"Finland", "Finlande (la)", "FI", "FIN", "246"}, + {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"}, + {"France", "France (la)", "FR", "FRA", "250"}, + {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, + {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, + {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, + {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, + {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, + {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, + {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, + {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, + {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, + {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, + {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, + {"Kiribati", "Kiribati", "KI", "KIR", "296"}, + {"Greece", "Grèce (la)", "GR", "GRC", "300"}, + {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, + {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, + {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, + {"Guam", "Guam", "GU", "GUM", "316"}, + {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, + {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, + {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, + {"Haiti", "Haïti", "HT", "HTI", "332"}, + {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"}, + {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, + {"Honduras", "Honduras (le)", "HN", "HND", "340"}, + {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, + {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, + {"Iceland", "Islande (l')", "IS", "ISL", "352"}, + {"India", "Inde (l')", "IN", "IND", "356"}, + {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, + {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, + {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, + {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, + {"Israel", "Israël", "IL", "ISR", "376"}, + {"Italy", "Italie (l')", "IT", "ITA", "380"}, + {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, + {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, + {"Japan", "Japon (le)", "JP", "JPN", "392"}, + {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, + {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, + {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, + {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, + {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, + {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, + {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, + {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, + {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, + {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, + {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, + {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, + {"Libya", "Libye (la)", "LY", "LBY", "434"}, + {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, + {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, + {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, + {"Macao", "Macao", "MO", "MAC", "446"}, + {"Madagascar", "Madagascar", "MG", "MDG", "450"}, + {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, + {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, + {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, + {"Mali", "Mali (le)", "ML", "MLI", "466"}, + {"Malta", "Malte", "MT", "MLT", "470"}, + {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, + {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, + {"Mauritius", "Maurice", "MU", "MUS", "480"}, + {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, + {"Monaco", "Monaco", "MC", "MCO", "492"}, + {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, + {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, + {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, + {"Montserrat", "Montserrat", "MS", "MSR", "500"}, + {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, + {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, + {"Oman", "Oman", "OM", "OMN", "512"}, + {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, + {"Nauru", "Nauru", "NR", "NRU", "520"}, + {"Nepal", "Népal (le)", "NP", "NPL", "524"}, + {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, + {"Curaçao", "Curaçao", "CW", "CUW", "531"}, + {"Aruba", "Aruba", "AW", "ABW", "533"}, + {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, + {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, + {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, + {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, + {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, + {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, + {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, + {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, + {"Niue", "Niue", "NU", "NIU", "570"}, + {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"}, + {"Norway", "Norvège (la)", "NO", "NOR", "578"}, + {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"}, + {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, + {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, + {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"}, + {"Palau", "Palaos (les)", "PW", "PLW", "585"}, + {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, + {"Panama", "Panama (le)", "PA", "PAN", "591"}, + {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, + {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, + {"Peru", "Pérou (le)", "PE", "PER", "604"}, + {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, + {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, + {"Poland", "Pologne (la)", "PL", "POL", "616"}, + {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, + {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, + {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, + {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, + {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, + {"Réunion", "Réunion (La)", "RE", "REU", "638"}, + {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, + {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, + {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, + {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, + {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, + {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, + {"Anguilla", "Anguilla", "AI", "AIA", "660"}, + {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, + {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, + {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, + {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, + {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, + {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, + {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, + {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, + {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, + {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, + {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, + {"Singapore", "Singapour", "SG", "SGP", "702"}, + {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, + {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, + {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, + {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, + {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, + {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, + {"Spain", "Espagne (l')", "ES", "ESP", "724"}, + {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, + {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, + {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, + {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, + {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"}, + {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, + {"Sweden", "Suède (la)", "SE", "SWE", "752"}, + {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, + {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, + {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, + {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, + {"Togo", "Togo (le)", "TG", "TGO", "768"}, + {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, + {"Tonga", "Tonga (les)", "TO", "TON", "776"}, + {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, + {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, + {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, + {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, + {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, + {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"}, + {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, + {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, + {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, + {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, + {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, + {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, + {"Guernsey", "Guernesey", "GG", "GGY", "831"}, + {"Jersey", "Jersey", "JE", "JEY", "832"}, + {"Isle of Man", "Île de Man", "IM", "IMN", "833"}, + {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, + {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, + {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"}, + {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, + {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, + {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, + {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, + {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, + {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, + {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, + {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, +} + +// ISO4217List is the list of ISO currency codes +var ISO4217List = []string{ + "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", + "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", + "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", + "DJF", "DKK", "DOP", "DZD", + "EGP", "ERN", "ETB", "EUR", + "FJD", "FKP", + "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", + "HKD", "HNL", "HRK", "HTG", "HUF", + "IDR", "ILS", "INR", "IQD", "IRR", "ISK", + "JMD", "JOD", "JPY", + "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", + "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", + "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", + "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", + "OMR", + "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", + "QAR", + "RON", "RSD", "RUB", "RWF", + "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL", + "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", + "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS", + "VEF", "VND", "VUV", + "WST", + "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", + "YER", + "ZAR", "ZMW", "ZWL", +} + +// ISO693Entry stores ISO language codes +type ISO693Entry struct { + Alpha3bCode string + Alpha2Code string + English string +} + +//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json +var ISO693List = []ISO693Entry{ + {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, + {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, + {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, + {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, + {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, + {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, + {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, + {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, + {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, + {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, + {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, + {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, + {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, + {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, + {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, + {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, + {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, + {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, + {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, + {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, + {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, + {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, + {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, + {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, + {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, + {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, + {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, + {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, + {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, + {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, + {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, + {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, + {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, + {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, + {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, + {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, + {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, + {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, + {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, + {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, + {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, + {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, + {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, + {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, + {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, + {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, + {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, + {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, + {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, + {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, + {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, + {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, + {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, + {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, + {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, + {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, + {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, + {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, + {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, + {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, + {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, + {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, + {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, + {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, + {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, + {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, + {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, + {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, + {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, + {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, + {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, + {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, + {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, + {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, + {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, + {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, + {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, + {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, + {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, + {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, + {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, + {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, + {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, + {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, + {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, + {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, + {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, + {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, + {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, + {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, + {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, + {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, + {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, + {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, + {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, + {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, + {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, + {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, + {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, + {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, + {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, + {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, + {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, + {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, + {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, + {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, + {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, + {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, + {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, + {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, + {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, + {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, + {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, + {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, + {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, + {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, + {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, + {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"}, + {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, + {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, + {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, + {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, + {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, + {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, + {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, + {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, + {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, + {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, + {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, + {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, + {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, + {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, + {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, + {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, + {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, + {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, + {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, + {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, + {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, + {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, + {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, + {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, + {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, + {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, + {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, + {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, + {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, + {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, + {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, + {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, + {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, + {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, + {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, + {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, + {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, + {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, + {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, + {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, + {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, + {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, + {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, + {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, + {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, + {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, + {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, + {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, + {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, + {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, + {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, + {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, + {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, + {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, + {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, + {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, + {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, + {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, + {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, + {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, + {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, + {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, + {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, + {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, + {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, + {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, +} diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go new file mode 100644 index 000000000000..a0b706a743ce --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/utils.go @@ -0,0 +1,270 @@ +package govalidator + +import ( + "errors" + "fmt" + "html" + "math" + "path" + "regexp" + "strings" + "unicode" + "unicode/utf8" +) + +// Contains check if the string contains the substring. +func Contains(str, substring string) bool { + return strings.Contains(str, substring) +} + +// Matches check if string matches the pattern (pattern is regular expression) +// In case of error return false +func Matches(str, pattern string) bool { + match, _ := regexp.MatchString(pattern, str) + return match +} + +// LeftTrim trim characters from the left-side of the input. +// If second argument is empty, it's will be remove leading spaces. +func LeftTrim(str, chars string) string { + if chars == "" { + return strings.TrimLeftFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("^[" + chars + "]+") + return r.ReplaceAllString(str, "") +} + +// RightTrim trim characters from the right-side of the input. +// If second argument is empty, it's will be remove spaces. +func RightTrim(str, chars string) string { + if chars == "" { + return strings.TrimRightFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("[" + chars + "]+$") + return r.ReplaceAllString(str, "") +} + +// Trim trim characters from both sides of the input. +// If second argument is empty, it's will be remove spaces. +func Trim(str, chars string) string { + return LeftTrim(RightTrim(str, chars), chars) +} + +// WhiteList remove characters that do not appear in the whitelist. +func WhiteList(str, chars string) string { + pattern := "[^" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// BlackList remove characters that appear in the blacklist. +func BlackList(str, chars string) string { + pattern := "[" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// StripLow remove characters with a numerical value < 32 and 127, mostly control characters. +// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). +func StripLow(str string, keepNewLines bool) string { + chars := "" + if keepNewLines { + chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" + } else { + chars = "\x00-\x1F\x7F" + } + return BlackList(str, chars) +} + +// ReplacePattern replace regular expression pattern in string +func ReplacePattern(str, pattern, replace string) string { + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, replace) +} + +// Escape replace <, >, & and " with HTML entities. +var Escape = html.EscapeString + +func addSegment(inrune, segment []rune) []rune { + if len(segment) == 0 { + return inrune + } + if len(inrune) != 0 { + inrune = append(inrune, '_') + } + inrune = append(inrune, segment...) + return inrune +} + +// UnderscoreToCamelCase converts from underscore separated form to camel case form. +// Ex.: my_func => MyFunc +func UnderscoreToCamelCase(s string) string { + return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) +} + +// CamelCaseToUnderscore converts from camel case form to underscore separated form. +// Ex.: MyFunc => my_func +func CamelCaseToUnderscore(str string) string { + var output []rune + var segment []rune + for _, r := range str { + + // not treat number as separate segment + if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { + output = addSegment(output, segment) + segment = nil + } + segment = append(segment, unicode.ToLower(r)) + } + output = addSegment(output, segment) + return string(output) +} + +// Reverse return reversed string +func Reverse(s string) string { + r := []rune(s) + for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r) +} + +// GetLines split string by "\n" and return array of lines +func GetLines(s string) []string { + return strings.Split(s, "\n") +} + +// GetLine return specified line of multiline string +func GetLine(s string, index int) (string, error) { + lines := GetLines(s) + if index < 0 || index >= len(lines) { + return "", errors.New("line index out of bounds") + } + return lines[index], nil +} + +// RemoveTags remove all tags from HTML string +func RemoveTags(s string) string { + return ReplacePattern(s, "<[^>]*>", "") +} + +// SafeFileName return safe string that can be used in file names +func SafeFileName(str string) string { + name := strings.ToLower(str) + name = path.Clean(path.Base(name)) + name = strings.Trim(name, " ") + separators, err := regexp.Compile(`[ &_=+:]`) + if err == nil { + name = separators.ReplaceAllString(name, "-") + } + legal, err := regexp.Compile(`[^[:alnum:]-.]`) + if err == nil { + name = legal.ReplaceAllString(name, "") + } + for strings.Contains(name, "--") { + name = strings.Replace(name, "--", "-", -1) + } + return name +} + +// NormalizeEmail canonicalize an email address. +// The local part of the email address is lowercased for all domains; the hostname is always lowercased and +// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). +// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and +// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are +// normalized to @gmail.com. +func NormalizeEmail(str string) (string, error) { + if !IsEmail(str) { + return "", fmt.Errorf("%s is not an email", str) + } + parts := strings.Split(str, "@") + parts[0] = strings.ToLower(parts[0]) + parts[1] = strings.ToLower(parts[1]) + if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { + parts[1] = "gmail.com" + parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] + } + return strings.Join(parts, "@"), nil +} + +// Truncate a string to the closest length without breaking words. +func Truncate(str string, length int, ending string) string { + var aftstr, befstr string + if len(str) > length { + words := strings.Fields(str) + before, present := 0, 0 + for i := range words { + befstr = aftstr + before = present + aftstr = aftstr + words[i] + " " + present = len(aftstr) + if present > length && i != 0 { + if (length - before) < (present - length) { + return Trim(befstr, " /\\.,\"'#!?&@+-") + ending + } + return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending + } + } + } + + return str +} + +// PadLeft pad left side of string if size of string is less then indicated pad length +func PadLeft(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, false) +} + +// PadRight pad right side of string if size of string is less then indicated pad length +func PadRight(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, false, true) +} + +// PadBoth pad sides of string if size of string is less then indicated pad length +func PadBoth(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, true) +} + +// PadString either left, right or both sides, not the padding string can be unicode and more then one +// character +func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { + + // When padded length is less then the current string size + if padLen < utf8.RuneCountInString(str) { + return str + } + + padLen -= utf8.RuneCountInString(str) + + targetLen := padLen + + targetLenLeft := targetLen + targetLenRight := targetLen + if padLeft && padRight { + targetLenLeft = padLen / 2 + targetLenRight = padLen - targetLenLeft + } + + strToRepeatLen := utf8.RuneCountInString(padStr) + + repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) + repeatedString := strings.Repeat(padStr, repeatTimes) + + leftSide := "" + if padLeft { + leftSide = repeatedString[0:targetLenLeft] + } + + rightSide := "" + if padRight { + rightSide = repeatedString[0:targetLenRight] + } + + return leftSide + str + rightSide +} + +// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object +func TruncatingErrorf(str string, args ...interface{}) error { + n := strings.Count(str, "%s") + return fmt.Errorf(str, args[:n]...) +} diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go new file mode 100644 index 000000000000..b18bbcb4c99f --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/validator.go @@ -0,0 +1,1278 @@ +// Package govalidator is package of validators and sanitizers for strings, structs and collections. +package govalidator + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +var ( + fieldsRequiredByDefault bool + nilPtrAllowedByRequired = false + notNumberRegexp = regexp.MustCompile("[^0-9]+") + whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) + paramsRegexp = regexp.MustCompile(`\(.*\)$`) +) + +const maxURLRuneCount = 2083 +const minURLRuneCount = 3 +const RF3339WithoutZone = "2006-01-02T15:04:05" + +// SetFieldsRequiredByDefault causes validation to fail when struct fields +// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). +// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +// type exampleStruct struct { +// Name string `` +// Email string `valid:"email"` +// This, however, will only fail when Email is empty or an invalid email address: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email"` +// Lastly, this will only fail when Email is an invalid email address but not when it's empty: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email,optional"` +func SetFieldsRequiredByDefault(value bool) { + fieldsRequiredByDefault = value +} + +// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. +// The validation will still reject ptr fields in their zero value state. Example with this enabled: +// type exampleStruct struct { +// Name *string `valid:"required"` +// With `Name` set to "", this will be considered invalid input and will cause a validation error. +// With `Name` set to nil, this will be considered valid by validation. +// By default this is disabled. +func SetNilPtrAllowedByRequired(value bool) { + nilPtrAllowedByRequired = value +} + +// IsEmail check if the string is an email. +func IsEmail(str string) bool { + // TODO uppercase letters are not supported + return rxEmail.MatchString(str) +} + +// IsExistingEmail check if the string is an email of existing domain +func IsExistingEmail(email string) bool { + + if len(email) < 6 || len(email) > 254 { + return false + } + at := strings.LastIndex(email, "@") + if at <= 0 || at > len(email)-3 { + return false + } + user := email[:at] + host := email[at+1:] + if len(user) > 64 { + return false + } + if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { + return false + } + switch host { + case "localhost", "example.com": + return true + } + if _, err := net.LookupMX(host); err != nil { + if _, err := net.LookupIP(host); err != nil { + return false + } + } + + return true +} + +// IsURL check if the string is an URL. +func IsURL(str string) bool { + if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { + return false + } + strTemp := str + if strings.Contains(str, ":") && !strings.Contains(str, "://") { + // support no indicated urlscheme but with colon for port number + // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString + strTemp = "http://" + str + } + u, err := url.Parse(strTemp) + if err != nil { + return false + } + if strings.HasPrefix(u.Host, ".") { + return false + } + if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { + return false + } + return rxURL.MatchString(str) +} + +// IsRequestURL check if the string rawurl, assuming +// it was received in an HTTP request, is a valid +// URL confirm to RFC 3986 +func IsRequestURL(rawurl string) bool { + url, err := url.ParseRequestURI(rawurl) + if err != nil { + return false //Couldn't even parse the rawurl + } + if len(url.Scheme) == 0 { + return false //No Scheme found + } + return true +} + +// IsRequestURI check if the string rawurl, assuming +// it was received in an HTTP request, is an +// absolute URI or an absolute path. +func IsRequestURI(rawurl string) bool { + _, err := url.ParseRequestURI(rawurl) + return err == nil +} + +// IsAlpha check if the string contains only letters (a-zA-Z). Empty string is valid. +func IsAlpha(str string) bool { + if IsNull(str) { + return true + } + return rxAlpha.MatchString(str) +} + +//IsUTFLetter check if the string contains only unicode letter characters. +//Similar to IsAlpha but for all languages. Empty string is valid. +func IsUTFLetter(str string) bool { + if IsNull(str) { + return true + } + + for _, c := range str { + if !unicode.IsLetter(c) { + return false + } + } + return true + +} + +// IsAlphanumeric check if the string contains only letters and numbers. Empty string is valid. +func IsAlphanumeric(str string) bool { + if IsNull(str) { + return true + } + return rxAlphanumeric.MatchString(str) +} + +// IsUTFLetterNumeric check if the string contains only unicode letters and numbers. Empty string is valid. +func IsUTFLetterNumeric(str string) bool { + if IsNull(str) { + return true + } + for _, c := range str { + if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok + return false + } + } + return true + +} + +// IsNumeric check if the string contains only numbers. Empty string is valid. +func IsNumeric(str string) bool { + if IsNull(str) { + return true + } + return rxNumeric.MatchString(str) +} + +// IsUTFNumeric check if the string contains only unicode numbers of any kind. +// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid. +func IsUTFNumeric(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsNumber(c) { //numbers && minus sign are ok + return false + } + } + return true + +} + +// IsUTFDigit check if the string contains only unicode radix-10 decimal digits. Empty string is valid. +func IsUTFDigit(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsDigit(c) { //digits && minus sign are ok + return false + } + } + return true + +} + +// IsHexadecimal check if the string is a hexadecimal number. +func IsHexadecimal(str string) bool { + return rxHexadecimal.MatchString(str) +} + +// IsHexcolor check if the string is a hexadecimal color. +func IsHexcolor(str string) bool { + return rxHexcolor.MatchString(str) +} + +// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB). +func IsRGBcolor(str string) bool { + return rxRGBcolor.MatchString(str) +} + +// IsLowerCase check if the string is lowercase. Empty string is valid. +func IsLowerCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToLower(str) +} + +// IsUpperCase check if the string is uppercase. Empty string is valid. +func IsUpperCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToUpper(str) +} + +// HasLowerCase check if the string contains at least 1 lowercase. Empty string is valid. +func HasLowerCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasLowerCase.MatchString(str) +} + +// HasUpperCase check if the string contians as least 1 uppercase. Empty string is valid. +func HasUpperCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasUpperCase.MatchString(str) +} + +// IsInt check if the string is an integer. Empty string is valid. +func IsInt(str string) bool { + if IsNull(str) { + return true + } + return rxInt.MatchString(str) +} + +// IsFloat check if the string is a float. +func IsFloat(str string) bool { + return str != "" && rxFloat.MatchString(str) +} + +// IsDivisibleBy check if the string is a number that's divisible by another. +// If second argument is not valid integer or zero, it's return false. +// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). +func IsDivisibleBy(str, num string) bool { + f, _ := ToFloat(str) + p := int64(f) + q, _ := ToInt(num) + if q == 0 { + return false + } + return (p == 0) || (p%q == 0) +} + +// IsNull check if the string is null. +func IsNull(str string) bool { + return len(str) == 0 +} + +// HasWhitespaceOnly checks the string only contains whitespace +func HasWhitespaceOnly(str string) bool { + return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) +} + +// HasWhitespace checks if the string contains any whitespace +func HasWhitespace(str string) bool { + return len(str) > 0 && rxHasWhitespace.MatchString(str) +} + +// IsByteLength check if the string's length (in bytes) falls in a range. +func IsByteLength(str string, min, max int) bool { + return len(str) >= min && len(str) <= max +} + +// IsUUIDv3 check if the string is a UUID version 3. +func IsUUIDv3(str string) bool { + return rxUUID3.MatchString(str) +} + +// IsUUIDv4 check if the string is a UUID version 4. +func IsUUIDv4(str string) bool { + return rxUUID4.MatchString(str) +} + +// IsUUIDv5 check if the string is a UUID version 5. +func IsUUIDv5(str string) bool { + return rxUUID5.MatchString(str) +} + +// IsUUID check if the string is a UUID (version 3, 4 or 5). +func IsUUID(str string) bool { + return rxUUID.MatchString(str) +} + +// IsCreditCard check if the string is a credit card. +func IsCreditCard(str string) bool { + sanitized := notNumberRegexp.ReplaceAllString(str, "") + if !rxCreditCard.MatchString(sanitized) { + return false + } + var sum int64 + var digit string + var tmpNum int64 + var shouldDouble bool + for i := len(sanitized) - 1; i >= 0; i-- { + digit = sanitized[i:(i + 1)] + tmpNum, _ = ToInt(digit) + if shouldDouble { + tmpNum *= 2 + if tmpNum >= 10 { + sum += ((tmpNum % 10) + 1) + } else { + sum += tmpNum + } + } else { + sum += tmpNum + } + shouldDouble = !shouldDouble + } + + return sum%10 == 0 +} + +// IsISBN10 check if the string is an ISBN version 10. +func IsISBN10(str string) bool { + return IsISBN(str, 10) +} + +// IsISBN13 check if the string is an ISBN version 13. +func IsISBN13(str string) bool { + return IsISBN(str, 13) +} + +// IsISBN check if the string is an ISBN (version 10 or 13). +// If version value is not equal to 10 or 13, it will be check both variants. +func IsISBN(str string, version int) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + var checksum int32 + var i int32 + if version == 10 { + if !rxISBN10.MatchString(sanitized) { + return false + } + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(sanitized[i]-'0') + } + if sanitized[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(sanitized[9]-'0') + } + if checksum%11 == 0 { + return true + } + return false + } else if version == 13 { + if !rxISBN13.MatchString(sanitized) { + return false + } + factor := []int32{1, 3} + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(sanitized[i]-'0') + } + return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 + } + return IsISBN(str, 10) || IsISBN(str, 13) +} + +// IsJSON check if the string is valid JSON (note: uses json.Unmarshal). +func IsJSON(str string) bool { + var js json.RawMessage + return json.Unmarshal([]byte(str), &js) == nil +} + +// IsMultibyte check if the string contains one or more multibyte chars. Empty string is valid. +func IsMultibyte(str string) bool { + if IsNull(str) { + return true + } + return rxMultibyte.MatchString(str) +} + +// IsASCII check if the string contains ASCII chars only. Empty string is valid. +func IsASCII(str string) bool { + if IsNull(str) { + return true + } + return rxASCII.MatchString(str) +} + +// IsPrintableASCII check if the string contains printable ASCII chars only. Empty string is valid. +func IsPrintableASCII(str string) bool { + if IsNull(str) { + return true + } + return rxPrintableASCII.MatchString(str) +} + +// IsFullWidth check if the string contains any full-width chars. Empty string is valid. +func IsFullWidth(str string) bool { + if IsNull(str) { + return true + } + return rxFullWidth.MatchString(str) +} + +// IsHalfWidth check if the string contains any half-width chars. Empty string is valid. +func IsHalfWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) +} + +// IsVariableWidth check if the string contains a mixture of full and half-width chars. Empty string is valid. +func IsVariableWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) +} + +// IsBase64 check if a string is base64 encoded. +func IsBase64(str string) bool { + return rxBase64.MatchString(str) +} + +// IsFilePath check is a string is Win or Unix file path and returns it's type. +func IsFilePath(str string) (bool, int) { + if rxWinPath.MatchString(str) { + //check windows path limit see: + // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath + if len(str[3:]) > 32767 { + return false, Win + } + return true, Win + } else if rxUnixPath.MatchString(str) { + return true, Unix + } + return false, Unknown +} + +// IsDataURI checks if a string is base64 encoded data URI such as an image +func IsDataURI(str string) bool { + dataURI := strings.Split(str, ",") + if !rxDataURI.MatchString(dataURI[0]) { + return false + } + return IsBase64(dataURI[1]) +} + +// IsISO3166Alpha2 checks if a string is valid two-letter country code +func IsISO3166Alpha2(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO3166Alpha3 checks if a string is valid three-letter country code +func IsISO3166Alpha3(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha3Code { + return true + } + } + return false +} + +// IsISO693Alpha2 checks if a string is valid two-letter language code +func IsISO693Alpha2(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO693Alpha3b checks if a string is valid three-letter language code +func IsISO693Alpha3b(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha3bCode { + return true + } + } + return false +} + +// IsDNSName will validate the given string as a DNS name +func IsDNSName(str string) bool { + if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { + // constraints already violated + return false + } + return !IsIP(str) && rxDNSName.MatchString(str) +} + +// IsHash checks if a string is a hash of type algorithm. +// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] +func IsHash(str string, algorithm string) bool { + len := "0" + algo := strings.ToLower(algorithm) + + if algo == "crc32" || algo == "crc32b" { + len = "8" + } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { + len = "32" + } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { + len = "40" + } else if algo == "tiger192" { + len = "48" + } else if algo == "sha256" { + len = "64" + } else if algo == "sha384" { + len = "96" + } else if algo == "sha512" { + len = "128" + } else { + return false + } + + return Matches(str, "^[a-f0-9]{"+len+"}$") +} + +// IsDialString validates the given string for usage with the various Dial() functions +func IsDialString(str string) bool { + + if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { + return true + } + + return false +} + +// IsIP checks if a string is either IP version 4 or 6. +func IsIP(str string) bool { + return net.ParseIP(str) != nil +} + +// IsPort checks if a string represents a valid port +func IsPort(str string) bool { + if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { + return true + } + return false +} + +// IsIPv4 check if the string is an IP version 4. +func IsIPv4(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ".") +} + +// IsIPv6 check if the string is an IP version 6. +func IsIPv6(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ":") +} + +// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6) +func IsCIDR(str string) bool { + _, _, err := net.ParseCIDR(str) + return err == nil +} + +// IsMAC check if a string is valid MAC address. +// Possible MAC formats: +// 01:23:45:67:89:ab +// 01:23:45:67:89:ab:cd:ef +// 01-23-45-67-89-ab +// 01-23-45-67-89-ab-cd-ef +// 0123.4567.89ab +// 0123.4567.89ab.cdef +func IsMAC(str string) bool { + _, err := net.ParseMAC(str) + return err == nil +} + +// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name +func IsHost(str string) bool { + return IsIP(str) || IsDNSName(str) +} + +// IsMongoID check if the string is a valid hex-encoded representation of a MongoDB ObjectId. +func IsMongoID(str string) bool { + return rxHexadecimal.MatchString(str) && (len(str) == 24) +} + +// IsLatitude check if a string is valid latitude. +func IsLatitude(str string) bool { + return rxLatitude.MatchString(str) +} + +// IsLongitude check if a string is valid longitude. +func IsLongitude(str string) bool { + return rxLongitude.MatchString(str) +} + +// IsRsaPublicKey check if a string is valid public key with provided length +func IsRsaPublicKey(str string, keylen int) bool { + bb := bytes.NewBufferString(str) + pemBytes, err := ioutil.ReadAll(bb) + if err != nil { + return false + } + block, _ := pem.Decode(pemBytes) + if block != nil && block.Type != "PUBLIC KEY" { + return false + } + var der []byte + + if block != nil { + der = block.Bytes + } else { + der, err = base64.StdEncoding.DecodeString(str) + if err != nil { + return false + } + } + + key, err := x509.ParsePKIXPublicKey(der) + if err != nil { + return false + } + pubkey, ok := key.(*rsa.PublicKey) + if !ok { + return false + } + bitlen := len(pubkey.N.Bytes()) * 8 + return bitlen == int(keylen) +} + +func toJSONName(tag string) string { + if tag == "" { + return "" + } + + // JSON name always comes first. If there's no options then split[0] is + // JSON name, if JSON name is not set, then split[0] is an empty string. + split := strings.SplitN(tag, ",", 2) + + name := split[0] + + // However it is possible that the field is skipped when + // (de-)serializing from/to JSON, in which case assume that there is no + // tag name to use + if name == "-" { + return "" + } + return name +} + +func PrependPathToErrors(err error, path string) error { + switch err2 := err.(type) { + case Error: + err2.Path = append([]string{path}, err2.Path...) + return err2 + case Errors: + errors := err2.Errors() + for i, err3 := range errors { + errors[i] = PrependPathToErrors(err3, path) + } + return err2 + } + fmt.Println(err) + return err +} + +// ValidateStruct use tags for fields. +// result will be equal to `false` if there are any errors. +func ValidateStruct(s interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + val := reflect.ValueOf(s) + if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + val = val.Elem() + } + // we only accept structs + if val.Kind() != reflect.Struct { + return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) + } + var errs Errors + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + typeField := val.Type().Field(i) + if typeField.PkgPath != "" { + continue // Private field + } + structResult := true + if valueField.Kind() == reflect.Interface { + valueField = valueField.Elem() + } + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + typeField.Tag.Get(tagName) != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = PrependPathToErrors(err, typeField.Name) + errs = append(errs, err) + } + } + resultField, err2 := typeCheck(valueField, typeField, val, nil) + if err2 != nil { + + // Replace structure name with JSON name if there is a tag on the variable + jsonTag := toJSONName(typeField.Tag.Get("json")) + if jsonTag != "" { + switch jsonError := err2.(type) { + case Error: + jsonError.Name = jsonTag + err2 = jsonError + case Errors: + for i2, err3 := range jsonError { + switch customErr := err3.(type) { + case Error: + customErr.Name = jsonTag + jsonError[i2] = customErr + } + } + + err2 = jsonError + } + } + + errs = append(errs, err2) + } + result = result && resultField && structResult + } + if len(errs) > 0 { + err = errs + } + return result, err +} + +// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} +func parseTagIntoMap(tag string) tagOptionsMap { + optionsMap := make(tagOptionsMap) + options := strings.Split(tag, ",") + + for i, option := range options { + option = strings.TrimSpace(option) + + validationOptions := strings.Split(option, "~") + if !isValidTag(validationOptions[0]) { + continue + } + if len(validationOptions) == 2 { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} + } else { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} + } + } + return optionsMap +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +// IsSSN will validate the given string as a U.S. Social Security Number +func IsSSN(str string) bool { + if str == "" || len(str) != 11 { + return false + } + return rxSSN.MatchString(str) +} + +// IsSemver check if string is valid semantic version +func IsSemver(str string) bool { + return rxSemver.MatchString(str) +} + +// IsTime check if string is valid according to given format +func IsTime(str string, format string) bool { + _, err := time.Parse(format, str) + return err == nil +} + +// IsRFC3339 check if string is valid timestamp value according to RFC3339 +func IsRFC3339(str string) bool { + return IsTime(str, time.RFC3339) +} + +// IsRFC3339WithoutZone check if string is valid timestamp value according to RFC3339 which excludes the timezone. +func IsRFC3339WithoutZone(str string) bool { + return IsTime(str, RF3339WithoutZone) +} + +// IsISO4217 check if string is valid ISO currency code +func IsISO4217(str string) bool { + for _, currency := range ISO4217List { + if str == currency { + return true + } + } + + return false +} + +// ByteLength check string's length +func ByteLength(str string, params ...string) bool { + if len(params) == 2 { + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return len(str) >= int(min) && len(str) <= int(max) + } + + return false +} + +// RuneLength check string's length +// Alias for StringLength +func RuneLength(str string, params ...string) bool { + return StringLength(str, params...) +} + +// IsRsaPub check whether string is valid RSA key +// Alias for IsRsaPublicKey +func IsRsaPub(str string, params ...string) bool { + if len(params) == 1 { + len, _ := ToInt(params[0]) + return IsRsaPublicKey(str, int(len)) + } + + return false +} + +// StringMatches checks if a string matches a given pattern. +func StringMatches(s string, params ...string) bool { + if len(params) == 1 { + pattern := params[0] + return Matches(s, pattern) + } + return false +} + +// StringLength check string's length (including multi byte strings) +func StringLength(str string, params ...string) bool { + + if len(params) == 2 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return strLength >= int(min) && strLength <= int(max) + } + + return false +} + +// Range check string's length +func Range(str string, params ...string) bool { + if len(params) == 2 { + value, _ := ToFloat(str) + min, _ := ToFloat(params[0]) + max, _ := ToFloat(params[1]) + return InRange(value, min, max) + } + + return false +} + +func isInRaw(str string, params ...string) bool { + if len(params) == 1 { + rawParams := params[0] + + parsedParams := strings.Split(rawParams, "|") + + return IsIn(str, parsedParams...) + } + + return false +} + +// IsIn check if string str is a member of the set of strings params +func IsIn(str string, params ...string) bool { + for _, param := range params { + if str == param { + return true + } + } + + return false +} + +func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { + if nilPtrAllowedByRequired { + k := v.Kind() + if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { + return true, nil + } + } + + if requiredOption, isRequired := options["required"]; isRequired { + if len(requiredOption.customErrorMessage) > 0 { + return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} + } + return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} + } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { + return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} + } + // not required and empty is valid + return true, nil +} + +func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { + if !v.IsValid() { + return false, nil + } + + tag := t.Tag.Get(tagName) + + // Check if the field should be ignored + switch tag { + case "": + if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { + if !fieldsRequiredByDefault { + return true, nil + } + return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} + } + case "-": + return true, nil + } + + isRootType := false + if options == nil { + isRootType = true + options = parseTagIntoMap(tag) + } + + if isEmptyValue(v) { + // an empty value is not validated, check only required + isValid, resultErr = checkRequired(v, t, options) + for key := range options { + delete(options, key) + } + return isValid, resultErr + } + + var customTypeErrors Errors + optionsOrder := options.orderedKeys() + for _, validatorName := range optionsOrder { + validatorStruct := options[validatorName] + if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { + delete(options, validatorName) + + if result := validatefunc(v.Interface(), o.Interface()); !result { + if len(validatorStruct.customErrorMessage) > 0 { + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) + continue + } + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) + } + } + } + + if len(customTypeErrors.Errors()) > 0 { + return false, customTypeErrors + } + + if isRootType { + // Ensure that we've checked the value by all specified validators before report that the value is valid + defer func() { + delete(options, "optional") + delete(options, "required") + + if isValid && resultErr == nil && len(options) != 0 { + optionsOrder := options.orderedKeys() + for _, validator := range optionsOrder { + isValid = false + resultErr = Error{t.Name, fmt.Errorf( + "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} + return + } + } + }() + } + + switch v.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, + reflect.String: + // for each tag option check the map of validator functions + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // Check whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // Check for param validators + for key, value := range ParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := ParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + // type not yet supported, fail + return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} + } + } + + if validatefunc, ok := TagMap[validator]; ok { + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field); !result && !negate || result && negate { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + //Not Yet Supported Types (Fail here!) + err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) + return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} + } + } + } + return true, nil + case reflect.Map: + if v.Type().Key().Kind() != reflect.String { + return false, &UnsupportedTypeError{v.Type()} + } + var sv stringValues + sv = v.MapKeys() + sort.Sort(sv) + result := true + for i, k := range sv { + var resultItem bool + var err error + if v.MapIndex(k).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.MapIndex(k), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) + if err != nil { + err = PrependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Slice, reflect.Array: + result := true + for i := 0; i < v.Len(); i++ { + var resultItem bool + var err error + if v.Index(i).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.Index(i), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.Index(i).Interface()) + if err != nil { + err = PrependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Interface: + // If the value is an interface then encode its element + if v.IsNil() { + return true, nil + } + return ValidateStruct(v.Interface()) + case reflect.Ptr: + // If the value is a pointer then check its element + if v.IsNil() { + return true, nil + } + return typeCheck(v.Elem(), t, o, options) + case reflect.Struct: + return ValidateStruct(v.Interface()) + default: + return false, &UnsupportedTypeError{v.Type()} + } +} + +func stripParams(validatorString string) string { + return paramsRegexp.ReplaceAllString(validatorString, "") +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.String, reflect.Array: + return v.Len() == 0 + case reflect.Map, reflect.Slice: + return v.Len() == 0 || v.IsNil() + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) +} + +// ErrorByField returns error for specified field of the struct +// validated by ValidateStruct or empty string if there are no errors +// or this field doesn't exists or doesn't have any errors. +func ErrorByField(e error, field string) string { + if e == nil { + return "" + } + return ErrorsByField(e)[field] +} + +// ErrorsByField returns map of errors of the struct validated +// by ValidateStruct or empty map if there are no errors. +func ErrorsByField(e error) map[string]string { + m := make(map[string]string) + if e == nil { + return m + } + // prototype for ValidateStruct + + switch e.(type) { + case Error: + m[e.(Error).Name] = e.(Error).Err.Error() + case Errors: + for _, item := range e.(Errors).Errors() { + n := ErrorsByField(item) + for k, v := range n { + m[k] = v + } + } + } + + return m +} + +// Error returns string equivalent for reflect.Type +func (e *UnsupportedTypeError) Error() string { + return "validator: unsupported type: " + e.Type.String() +} + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } +func (sv stringValues) get(i int) string { return sv[i].String() } diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml new file mode 100644 index 000000000000..cac7a5fcf063 --- /dev/null +++ b/vendor/github.com/asaskevich/govalidator/wercker.yml @@ -0,0 +1,15 @@ +box: golang +build: + steps: + - setup-go-workspace + + - script: + name: go get + code: | + go version + go get -t ./... + + - script: + name: go test + code: | + go test -race ./... diff --git a/vendor/github.com/cenkalti/backoff/.gitignore b/vendor/github.com/cenkalti/backoff/.gitignore new file mode 100644 index 000000000000..00268614f045 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/cenkalti/backoff/.travis.yml b/vendor/github.com/cenkalti/backoff/.travis.yml new file mode 100644 index 000000000000..ce9cb6233354 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/.travis.yml @@ -0,0 +1,2 @@ +language: go +go: 1.3.3 diff --git a/vendor/github.com/cenkalti/backoff/LICENSE b/vendor/github.com/cenkalti/backoff/LICENSE new file mode 100644 index 000000000000..89b817996558 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/README.md b/vendor/github.com/cenkalti/backoff/README.md new file mode 100644 index 000000000000..020b8fbf339e --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/README.md @@ -0,0 +1,116 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## How To + +We define two functions, `Retry()` and `RetryNotify()`. +They receive an `Operation` to execute, a `BackOff` algorithm, +and an optional `Notify` error handler. + +The operation will be executed, and will be retried on failure with delay +as given by the backoff algorithm. The backoff algorithm can also decide when to stop +retrying. +In addition, the notify error handler will be called after each failed attempt, +except for the last time, whose error should be handled by the caller. + +```go +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +func Retry(Operation, BackOff) error +func RetryNotify(Operation, BackOff, Notify) +``` + +## Examples + +See more advanced examples in the [godoc][advanced example]. + +### Retry + +Simple retry helper that uses the default exponential backoff algorithm: + +```go +operation := func() error { + // An operation that might fail. + return nil // or return errors.New("some error") +} + +err := Retry(operation, NewExponentialBackOff()) +if err != nil { + // Handle error. + return err +} + +// Operation is successful. +return nil +``` + +### Ticker + +```go +operation := func() error { + // An operation that might fail + return nil // or return errors.New("some error") +} + +b := NewExponentialBackOff() +ticker := NewTicker(b) + +var err error + +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +for range ticker.C { + if err = operation(); err != nil { + log.Println(err, "will retry...") + continue + } + + ticker.Stop() + break +} + +if err != nil { + // Operation has failed. + return err +} + +// Operation is successful. +return nil +``` + +## Getting Started + +```bash +# install +$ go get github.com/cenkalti/backoff + +# test +$ cd $GOPATH/src/github.com/cenkalti/backoff +$ go get -t ./... +$ go test -v -cover +``` + +[godoc]: https://godoc.org/github.com/cenkalti/backoff +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[travis]: https://travis-ci.org/cenkalti/backoff +[travis image]: https://travis-ci.org/cenkalti/backoff.png + +[google-http-java-client]: https://github.com/google/google-http-java-client +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/vendor/github.com/cenkalti/backoff/backoff.go b/vendor/github.com/cenkalti/backoff/backoff.go new file mode 100644 index 000000000000..61bd6df66c08 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/backoff.go @@ -0,0 +1,59 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Also has a Retry() helper for retrying operations that may fail. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff.Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/exponential.go b/vendor/github.com/cenkalti/backoff/exponential.go new file mode 100644 index 000000000000..ae65516dc0ef --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/exponential.go @@ -0,0 +1,156 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff stops. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Clock: SystemClock, + } + if b.RandomizationFactor < 0 { + b.RandomizationFactor = 0 + } else if b.RandomizationFactor > 1 { + b.RandomizationFactor = 1 + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { + return Stop + } + defer b.incrementCurrentInterval() + return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/retry.go b/vendor/github.com/cenkalti/backoff/retry.go new file mode 100644 index 000000000000..f01f2bbd07ea --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/retry.go @@ -0,0 +1,46 @@ +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the function f until it does not return error or BackOff stops. +// f is guaranteed to be run at least once. +// It is the caller's responsibility to reset b after Retry returns. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + var err error + var next time.Duration + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if next = b.NextBackOff(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + time.Sleep(next) + } +} diff --git a/vendor/github.com/cenkalti/backoff/ticker.go b/vendor/github.com/cenkalti/backoff/ticker.go new file mode 100644 index 000000000000..7a5ff4ed1fa9 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/ticker.go @@ -0,0 +1,79 @@ +package backoff + +import ( + "runtime" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send the time at times +// specified by the BackOff argument. Ticker is guaranteed to tick at least once. +// The channel is closed when Stop method is called or BackOff stops. +func NewTicker(b BackOff) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + stop: make(chan struct{}), + } + go t.run() + runtime.SetFinalizer(t, (*Ticker).Stop) + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + t.b.Reset() + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + return time.After(next) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go new file mode 100644 index 000000000000..5f17a82213f4 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go @@ -0,0 +1,375 @@ +package alertmanager + +import ( + "context" + "fmt" + "net/http" + "net/url" + "path/filepath" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/alertmanager/api" + "github.com/prometheus/alertmanager/cluster" + "github.com/prometheus/alertmanager/config" + "github.com/prometheus/alertmanager/dispatch" + "github.com/prometheus/alertmanager/inhibit" + "github.com/prometheus/alertmanager/nflog" + "github.com/prometheus/alertmanager/notify" + "github.com/prometheus/alertmanager/notify/email" + "github.com/prometheus/alertmanager/notify/hipchat" + "github.com/prometheus/alertmanager/notify/opsgenie" + "github.com/prometheus/alertmanager/notify/pagerduty" + "github.com/prometheus/alertmanager/notify/pushover" + "github.com/prometheus/alertmanager/notify/slack" + "github.com/prometheus/alertmanager/notify/victorops" + "github.com/prometheus/alertmanager/notify/webhook" + "github.com/prometheus/alertmanager/notify/wechat" + "github.com/prometheus/alertmanager/provider/mem" + "github.com/prometheus/alertmanager/silence" + "github.com/prometheus/alertmanager/template" + "github.com/prometheus/alertmanager/types" + "github.com/prometheus/alertmanager/ui" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/common/route" +) + +const notificationLogMaintenancePeriod = 15 * time.Minute + +// Config configures an Alertmanager. +type Config struct { + UserID string + // Used to persist notification logs and silences on disk. + DataDir string + Logger log.Logger + Peer *cluster.Peer + PeerTimeout time.Duration + Retention time.Duration + ExternalURL *url.URL +} + +// An Alertmanager manages the alerts for one user. +type Alertmanager struct { + cfg *Config + api *api.API + logger log.Logger + nflog *nflog.Log + silences *silence.Silences + marker types.Marker + alerts *mem.Alerts + dispatcher *dispatch.Dispatcher + inhibitor *inhibit.Inhibitor + pipelineBuilder *notify.PipelineBuilder + stop chan struct{} + wg sync.WaitGroup + mux *http.ServeMux + registry *prometheus.Registry + + activeMtx sync.Mutex + active bool +} + +var ( + webReload = make(chan chan error) + + // In order to workaround a bug in the alertmanager, which doesn't register the + // metrics in the input registry but to the global default one, we do define a + // singleton dispatcher metrics instance that is going to be shared across all + // tenants alertmanagers. + // TODO change this once the vendored alertmanager will have this PR merged into: + // https://github.com/prometheus/alertmanager/pull/2200 + dispatcherMetrics = dispatch.NewDispatcherMetrics(prometheus.NewRegistry()) +) + +func init() { + go func() { + // Since this is not a "normal" Alertmanager which reads its config + // from disk, we just accept and ignore web-based reload signals. Config + // updates are only applied externally via ApplyConfig(). + for range webReload { + } + }() +} + +// New creates a new Alertmanager. +func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) { + am := &Alertmanager{ + cfg: cfg, + logger: log.With(cfg.Logger, "user", cfg.UserID), + stop: make(chan struct{}), + active: false, + activeMtx: sync.Mutex{}, + } + + am.registry = reg + + am.wg.Add(1) + nflogID := fmt.Sprintf("nflog:%s", cfg.UserID) + var err error + am.nflog, err = nflog.New( + nflog.WithRetention(cfg.Retention), + nflog.WithSnapshot(filepath.Join(cfg.DataDir, nflogID)), + nflog.WithMaintenance(notificationLogMaintenancePeriod, am.stop, am.wg.Done), + nflog.WithMetrics(am.registry), + nflog.WithLogger(log.With(am.logger, "component", "nflog")), + ) + if err != nil { + return nil, fmt.Errorf("failed to create notification log: %v", err) + } + if cfg.Peer != nil { + c := cfg.Peer.AddState("nfl:"+cfg.UserID, am.nflog, am.registry) + am.nflog.SetBroadcast(c.Broadcast) + } + + am.marker = types.NewMarker(am.registry) + + silencesID := fmt.Sprintf("silences:%s", cfg.UserID) + am.silences, err = silence.New(silence.Options{ + SnapshotFile: filepath.Join(cfg.DataDir, silencesID), + Retention: cfg.Retention, + Logger: log.With(am.logger, "component", "silences"), + Metrics: am.registry, + }) + if err != nil { + return nil, fmt.Errorf("failed to create silences: %v", err) + } + if cfg.Peer != nil { + c := cfg.Peer.AddState("sil:"+cfg.UserID, am.silences, am.registry) + am.silences.SetBroadcast(c.Broadcast) + } + + am.pipelineBuilder = notify.NewPipelineBuilder(am.registry) + + am.wg.Add(1) + go func() { + am.silences.Maintenance(15*time.Minute, filepath.Join(cfg.DataDir, silencesID), am.stop) + am.wg.Done() + }() + + am.alerts, err = mem.NewAlerts(context.Background(), am.marker, 30*time.Minute, am.logger) + if err != nil { + return nil, fmt.Errorf("failed to create alerts: %v", err) + } + + am.api, err = api.New(api.Options{ + Alerts: am.alerts, + Silences: am.silences, + StatusFunc: am.marker.Status, + Peer: cfg.Peer, + Logger: log.With(am.logger, "component", "api"), + GroupFunc: func(f1 func(*dispatch.Route) bool, f2 func(*types.Alert, time.Time) bool) (dispatch.AlertGroups, map[model.Fingerprint][]string) { + return am.dispatcher.Groups(f1, f2) + }, + }) + if err != nil { + return nil, fmt.Errorf("failed to create api: %v", err) + } + + router := route.New().WithPrefix(am.cfg.ExternalURL.Path) + + ui.Register(router, webReload, log.With(am.logger, "component", "ui")) + am.mux = am.api.Register(router, am.cfg.ExternalURL.Path) + + return am, nil +} + +// clusterWait returns a function that inspects the current peer state and returns +// a duration of one base timeout for each peer with a higher ID than ourselves. +func clusterWait(p *cluster.Peer, timeout time.Duration) func() time.Duration { + return func() time.Duration { + return time.Duration(p.Position()) * timeout + } +} + +// ApplyConfig applies a new configuration to an Alertmanager. +func (am *Alertmanager) ApplyConfig(userID string, conf *config.Config) error { + templateFiles := make([]string, len(conf.Templates)) + if len(conf.Templates) > 0 { + for i, t := range conf.Templates { + templateFiles[i] = filepath.Join(am.cfg.DataDir, "templates", userID, t) + } + } + + tmpl, err := template.FromGlobs(templateFiles...) + if err != nil { + return err + } + tmpl.ExternalURL = am.cfg.ExternalURL + + am.api.Update(conf, func(_ model.LabelSet) {}) + + // Ensure inhibitor is set before being called + if am.inhibitor != nil { + am.inhibitor.Stop() + } + + // Ensure dispatcher is set before being called + if am.dispatcher != nil { + am.dispatcher.Stop() + } + + am.inhibitor = inhibit.NewInhibitor(am.alerts, conf.InhibitRules, am.marker, log.With(am.logger, "component", "inhibitor")) + + waitFunc := clusterWait(am.cfg.Peer, am.cfg.PeerTimeout) + timeoutFunc := func(d time.Duration) time.Duration { + if d < notify.MinTimeout { + d = notify.MinTimeout + } + return d + waitFunc() + } + + integrationsMap, err := buildIntegrationsMap(conf.Receivers, tmpl, am.logger) + if err != nil { + return nil + } + + pipeline := am.pipelineBuilder.New( + integrationsMap, + waitFunc, + am.inhibitor, + silence.NewSilencer(am.silences, am.marker, am.logger), + am.nflog, + am.cfg.Peer, + ) + am.dispatcher = dispatch.NewDispatcher( + am.alerts, + dispatch.NewRoute(conf.Route, nil), + pipeline, + am.marker, + timeoutFunc, + log.With(am.logger, "component", "dispatcher"), + dispatcherMetrics, + ) + + go am.dispatcher.Run() + go am.inhibitor.Run() + + // Ensure the alertmanager is set to active + am.activeMtx.Lock() + am.active = true + am.activeMtx.Unlock() + + return nil +} + +// IsActive returns if the alertmanager is currently running +// or is paused +func (am *Alertmanager) IsActive() bool { + am.activeMtx.Lock() + defer am.activeMtx.Unlock() + return am.active +} + +// Pause running jobs in the alertmanager that are able to be restarted and sets +// to inactives +func (am *Alertmanager) Pause() { + // Set to inactive + am.activeMtx.Lock() + am.active = false + am.activeMtx.Unlock() + + // Stop the inhibitor and dispatcher which will be recreated when + // a new config is applied + if am.inhibitor != nil { + am.inhibitor.Stop() + am.inhibitor = nil + } + if am.dispatcher != nil { + am.dispatcher.Stop() + am.dispatcher = nil + } + + // Remove all of the active silences from the alertmanager + silences, _, err := am.silences.Query() + if err != nil { + level.Warn(am.logger).Log("msg", "unable to retrieve silences for removal", "err", err) + } + for _, si := range silences { + err = am.silences.Expire(si.Id) + if err != nil { + level.Warn(am.logger).Log("msg", "unable to remove silence", "err", err, "silence", si.Id) + } + } +} + +// Stop stops the Alertmanager. +func (am *Alertmanager) Stop() { + if am.inhibitor != nil { + am.inhibitor.Stop() + } + + if am.dispatcher != nil { + am.dispatcher.Stop() + } + + am.alerts.Close() + close(am.stop) + am.wg.Wait() +} + +// buildIntegrationsMap builds a map of name to the list of integration notifiers off of a +// list of receiver config. +func buildIntegrationsMap(nc []*config.Receiver, tmpl *template.Template, logger log.Logger) (map[string][]notify.Integration, error) { + integrationsMap := make(map[string][]notify.Integration, len(nc)) + for _, rcv := range nc { + integrations, err := buildReceiverIntegrations(rcv, tmpl, logger) + if err != nil { + return nil, err + } + integrationsMap[rcv.Name] = integrations + } + return integrationsMap, nil +} + +// buildReceiverIntegrations builds a list of integration notifiers off of a +// receiver config. +// Taken from https://github.com/prometheus/alertmanager/blob/94d875f1227b29abece661db1a68c001122d1da5/cmd/alertmanager/main.go#L112-L159. +func buildReceiverIntegrations(nc *config.Receiver, tmpl *template.Template, logger log.Logger) ([]notify.Integration, error) { + var ( + errs types.MultiError + integrations []notify.Integration + add = func(name string, i int, rs notify.ResolvedSender, f func(l log.Logger) (notify.Notifier, error)) { + n, err := f(log.With(logger, "integration", name)) + if err != nil { + errs.Add(err) + return + } + integrations = append(integrations, notify.NewIntegration(n, rs, name, i)) + } + ) + + for i, c := range nc.WebhookConfigs { + add("webhook", i, c, func(l log.Logger) (notify.Notifier, error) { return webhook.New(c, tmpl, l) }) + } + for i, c := range nc.EmailConfigs { + add("email", i, c, func(l log.Logger) (notify.Notifier, error) { return email.New(c, tmpl, l), nil }) + } + for i, c := range nc.PagerdutyConfigs { + add("pagerduty", i, c, func(l log.Logger) (notify.Notifier, error) { return pagerduty.New(c, tmpl, l) }) + } + for i, c := range nc.OpsGenieConfigs { + add("opsgenie", i, c, func(l log.Logger) (notify.Notifier, error) { return opsgenie.New(c, tmpl, l) }) + } + for i, c := range nc.WechatConfigs { + add("wechat", i, c, func(l log.Logger) (notify.Notifier, error) { return wechat.New(c, tmpl, l) }) + } + for i, c := range nc.SlackConfigs { + add("slack", i, c, func(l log.Logger) (notify.Notifier, error) { return slack.New(c, tmpl, l) }) + } + for i, c := range nc.HipchatConfigs { + add("hipchat", i, c, func(l log.Logger) (notify.Notifier, error) { return hipchat.New(c, tmpl, l) }) + } + for i, c := range nc.VictorOpsConfigs { + add("victorops", i, c, func(l log.Logger) (notify.Notifier, error) { return victorops.New(c, tmpl, l) }) + } + for i, c := range nc.PushoverConfigs { + add("pushover", i, c, func(l log.Logger) (notify.Notifier, error) { return pushover.New(c, tmpl, l) }) + } + if errs.Len() > 0 { + return nil, &errs + } + return integrations, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go new file mode 100644 index 000000000000..721053dcbb4b --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go @@ -0,0 +1,210 @@ +package alertmanager + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/cortexproject/cortex/pkg/util" +) + +// This struct aggregates metrics exported by Alertmanager +// and re-exports those aggregates as Cortex metrics. +type alertmanagerMetrics struct { + // Maps userID -> registry + regsMu sync.Mutex + regs map[string]*prometheus.Registry + + // exported metrics, gathered from Alertmanager API + alertsReceived *prometheus.Desc + alertsInvalid *prometheus.Desc + + // exported metrics, gathered from Alertmanager PipelineBuilder + numNotifications *prometheus.Desc + numFailedNotifications *prometheus.Desc + notificationLatencySeconds *prometheus.Desc + + // exported metrics, gathered from Alertmanager nflog + nflogGCDuration *prometheus.Desc + nflogSnapshotDuration *prometheus.Desc + nflogSnapshotSize *prometheus.Desc + nflogQueriesTotal *prometheus.Desc + nflogQueryErrorsTotal *prometheus.Desc + nflogQueryDuration *prometheus.Desc + nflogPropagatedMessagesTotal *prometheus.Desc + + // exported metrics, gathered from Alertmanager Marker + markerAlerts *prometheus.Desc + + // exported metrics, gathered from Alertmanager Silences + silencesGCDuration *prometheus.Desc + silencesSnapshotDuration *prometheus.Desc + silencesSnapshotSize *prometheus.Desc + silencesQueriesTotal *prometheus.Desc + silencesQueryErrorsTotal *prometheus.Desc + silencesQueryDuration *prometheus.Desc + silences *prometheus.Desc + silencesPropagatedMessagesTotal *prometheus.Desc +} + +func newAlertmanagerMetrics() *alertmanagerMetrics { + return &alertmanagerMetrics{ + regs: map[string]*prometheus.Registry{}, + regsMu: sync.Mutex{}, + alertsReceived: prometheus.NewDesc( + "cortex_alertmanager_alerts_received_total", + "The total number of received alerts.", + []string{"user"}, nil), + alertsInvalid: prometheus.NewDesc( + "cortex_alertmanager_alerts_invalid_total", + "The total number of received alerts that were invalid.", + []string{"user"}, nil), + numNotifications: prometheus.NewDesc( + "cortex_alertmanager_notifications_total", + "The total number of attempted notifications.", + []string{"user"}, nil), + numFailedNotifications: prometheus.NewDesc( + "cortex_alertmanager_notifications_failed_total", + "The total number of failed notifications.", + []string{"user"}, nil), + notificationLatencySeconds: prometheus.NewDesc( + "cortex_alertmanager_notification_latency_seconds", + "The latency of notifications in seconds.", + nil, nil), + nflogGCDuration: prometheus.NewDesc( + "cortex_alertmanager_nflog_gc_duration_seconds", + "Duration of the last notification log garbage collection cycle.", + nil, nil), + nflogSnapshotDuration: prometheus.NewDesc( + "cortex_alertmanager_nflog_snapshot_duration_seconds", + "Duration of the last notification log snapshot.", + nil, nil), + nflogSnapshotSize: prometheus.NewDesc( + "cortex_alertmanager_nflog_snapshot_size_bytes", + "Size of the last notification log snapshot in bytes.", + nil, nil), + nflogQueriesTotal: prometheus.NewDesc( + "cortex_alertmanager_nflog_queries_total", + "Number of notification log queries were received.", + nil, nil), + nflogQueryErrorsTotal: prometheus.NewDesc( + "cortex_alertmanager_nflog_query_errors_total", + "Number notification log received queries that failed.", + nil, nil), + nflogQueryDuration: prometheus.NewDesc( + "cortex_alertmanager_nflog_query_duration_seconds", + "Duration of notification log query evaluation.", + nil, nil), + nflogPropagatedMessagesTotal: prometheus.NewDesc( + "cortex_alertmanager_nflog_gossip_messages_propagated_total", + "Number of received gossip messages that have been further gossiped.", + nil, nil), + markerAlerts: prometheus.NewDesc( + "cortex_alertmanager_alerts", + "How many alerts by state.", + []string{"user", "state"}, nil), + silencesGCDuration: prometheus.NewDesc( + "cortex_alertmanager_silences_gc_duration_seconds", + "Duration of the last silence garbage collection cycle.", + nil, nil), + silencesSnapshotDuration: prometheus.NewDesc( + "cortex_alertmanager_silences_snapshot_duration_seconds", + "Duration of the last silence snapshot.", + nil, nil), + silencesSnapshotSize: prometheus.NewDesc( + "cortex_alertmanager_silences_snapshot_size_bytes", + "Size of the last silence snapshot in bytes.", + nil, nil), + silencesQueriesTotal: prometheus.NewDesc( + "cortex_alertmanager_silences_queries_total", + "How many silence queries were received.", + nil, nil), + silencesQueryErrorsTotal: prometheus.NewDesc( + "cortex_alertmanager_silences_query_errors_total", + "How many silence received queries did not succeed.", + nil, nil), + silencesQueryDuration: prometheus.NewDesc( + "cortex_alertmanager_silences_query_duration_seconds", + "Duration of silence query evaluation.", + nil, nil), + silencesPropagatedMessagesTotal: prometheus.NewDesc( + "cortex_alertmanager_silences_gossip_messages_propagated_total", + "Number of received gossip messages that have been further gossiped.", + nil, nil), + silences: prometheus.NewDesc( + "cortex_alertmanager_silences", + "How many silences by state.", + []string{"user", "state"}, nil), + } +} + +func (m *alertmanagerMetrics) addUserRegistry(user string, reg *prometheus.Registry) { + m.regsMu.Lock() + m.regs[user] = reg + m.regsMu.Unlock() +} + +func (m *alertmanagerMetrics) registries() map[string]*prometheus.Registry { + regs := map[string]*prometheus.Registry{} + + m.regsMu.Lock() + defer m.regsMu.Unlock() + for uid, r := range m.regs { + regs[uid] = r + } + + return regs +} + +func (m *alertmanagerMetrics) Describe(out chan<- *prometheus.Desc) { + out <- m.alertsReceived + out <- m.alertsInvalid + out <- m.numNotifications + out <- m.numFailedNotifications + out <- m.notificationLatencySeconds + out <- m.nflogGCDuration + out <- m.nflogSnapshotDuration + out <- m.nflogSnapshotSize + out <- m.nflogQueriesTotal + out <- m.nflogQueryErrorsTotal + out <- m.nflogQueryDuration + out <- m.nflogPropagatedMessagesTotal + out <- m.markerAlerts + out <- m.silencesGCDuration + out <- m.silencesSnapshotDuration + out <- m.silencesSnapshotSize + out <- m.silencesQueriesTotal + out <- m.silencesQueryErrorsTotal + out <- m.silencesQueryDuration + out <- m.silences + out <- m.silencesPropagatedMessagesTotal +} + +func (m *alertmanagerMetrics) Collect(out chan<- prometheus.Metric) { + data := util.BuildMetricFamiliesPerUserFromUserRegistries(m.registries()) + + data.SendSumOfCountersPerUser(out, m.alertsReceived, "alertmanager_alerts_received_total") + data.SendSumOfCountersPerUser(out, m.alertsInvalid, "alertmanager_alerts_invalid_total") + + data.SendSumOfCountersPerUser(out, m.numNotifications, "alertmanager_notifications_total") + data.SendSumOfCountersPerUser(out, m.numFailedNotifications, "alertmanager_notifications_failed_total") + data.SendSumOfHistograms(out, m.notificationLatencySeconds, "alertmanager_notification_latency_seconds") + data.SendSumOfGaugesPerUserWithLabels(out, m.markerAlerts, "alertmanager_alerts", "state") + + data.SendSumOfSummaries(out, m.nflogGCDuration, "alertmanager_nflog_gc_duration_seconds") + data.SendSumOfSummaries(out, m.nflogSnapshotDuration, "alertmanager_nflog_snapshot_duration_seconds") + data.SendSumOfGauges(out, m.nflogSnapshotSize, "alertmanager_nflog_snapshot_size_bytes") + data.SendSumOfCounters(out, m.nflogQueriesTotal, "alertmanager_nflog_queries_total") + data.SendSumOfCounters(out, m.nflogQueryErrorsTotal, "alertmanager_nflog_query_errors_total") + data.SendSumOfHistograms(out, m.nflogQueryDuration, "alertmanager_nflog_query_duration_seconds") + data.SendSumOfCounters(out, m.nflogPropagatedMessagesTotal, "alertmanager_nflog_gossip_messages_propagated_total") + + data.SendSumOfSummaries(out, m.silencesGCDuration, "alertmanager_silences_gc_duration_seconds") + data.SendSumOfSummaries(out, m.silencesSnapshotDuration, "alertmanager_silences_snapshot_duration_seconds") + data.SendSumOfGauges(out, m.silencesSnapshotSize, "alertmanager_silences_snapshot_size_bytes") + data.SendSumOfCounters(out, m.silencesQueriesTotal, "alertmanager_silences_queries_total") + data.SendSumOfCounters(out, m.silencesQueryErrorsTotal, "alertmanager_silences_query_errors_total") + data.SendSumOfHistograms(out, m.silencesQueryDuration, "alertmanager_silences_query_duration_seconds") + data.SendSumOfCounters(out, m.silencesPropagatedMessagesTotal, "alertmanager_silences_gossip_messages_propagated_total") + data.SendSumOfGaugesPerUserWithLabels(out, m.silences, "alertmanager_silences", "state") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/alerts.pb.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/alerts.pb.go new file mode 100644 index 000000000000..3c4d815b8a3b --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/alerts.pb.go @@ -0,0 +1,818 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: alerts.proto + +package alerts + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type AlertConfigDesc struct { + User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + RawConfig string `protobuf:"bytes,2,opt,name=raw_config,json=rawConfig,proto3" json:"raw_config,omitempty"` + Templates []*TemplateDesc `protobuf:"bytes,3,rep,name=templates,proto3" json:"templates,omitempty"` +} + +func (m *AlertConfigDesc) Reset() { *m = AlertConfigDesc{} } +func (*AlertConfigDesc) ProtoMessage() {} +func (*AlertConfigDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_20493709c38b81dc, []int{0} +} +func (m *AlertConfigDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AlertConfigDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AlertConfigDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AlertConfigDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertConfigDesc.Merge(m, src) +} +func (m *AlertConfigDesc) XXX_Size() int { + return m.Size() +} +func (m *AlertConfigDesc) XXX_DiscardUnknown() { + xxx_messageInfo_AlertConfigDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertConfigDesc proto.InternalMessageInfo + +func (m *AlertConfigDesc) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *AlertConfigDesc) GetRawConfig() string { + if m != nil { + return m.RawConfig + } + return "" +} + +func (m *AlertConfigDesc) GetTemplates() []*TemplateDesc { + if m != nil { + return m.Templates + } + return nil +} + +type TemplateDesc struct { + Filename string `protobuf:"bytes,1,opt,name=filename,proto3" json:"filename,omitempty"` + Body string `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` +} + +func (m *TemplateDesc) Reset() { *m = TemplateDesc{} } +func (*TemplateDesc) ProtoMessage() {} +func (*TemplateDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_20493709c38b81dc, []int{1} +} +func (m *TemplateDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TemplateDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TemplateDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateDesc.Merge(m, src) +} +func (m *TemplateDesc) XXX_Size() int { + return m.Size() +} +func (m *TemplateDesc) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateDesc proto.InternalMessageInfo + +func (m *TemplateDesc) GetFilename() string { + if m != nil { + return m.Filename + } + return "" +} + +func (m *TemplateDesc) GetBody() string { + if m != nil { + return m.Body + } + return "" +} + +func init() { + proto.RegisterType((*AlertConfigDesc)(nil), "alerts.AlertConfigDesc") + proto.RegisterType((*TemplateDesc)(nil), "alerts.TemplateDesc") +} + +func init() { proto.RegisterFile("alerts.proto", fileDescriptor_20493709c38b81dc) } + +var fileDescriptor_20493709c38b81dc = []byte{ + // 253 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0xcc, 0x49, 0x2d, + 0x2a, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x83, 0xf0, 0xa4, 0x74, 0xd3, 0x33, + 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0xd3, 0xf3, 0xf5, 0xc1, 0xd2, + 0x49, 0xa5, 0x69, 0x60, 0x1e, 0x98, 0x03, 0x66, 0x41, 0xb4, 0x29, 0x55, 0x70, 0xf1, 0x3b, 0x82, + 0x34, 0x3a, 0xe7, 0xe7, 0xa5, 0x65, 0xa6, 0xbb, 0xa4, 0x16, 0x27, 0x0b, 0x09, 0x71, 0xb1, 0x94, + 0x16, 0xa7, 0x16, 0x49, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0xb2, 0x5c, 0x5c, + 0x45, 0x89, 0xe5, 0xf1, 0xc9, 0x60, 0x55, 0x12, 0x4c, 0x60, 0x19, 0xce, 0xa2, 0xc4, 0x72, 0x88, + 0x36, 0x21, 0x23, 0x2e, 0xce, 0x92, 0xd4, 0xdc, 0x82, 0x9c, 0xc4, 0x92, 0xd4, 0x62, 0x09, 0x66, + 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x11, 0x3d, 0xa8, 0xf3, 0x42, 0xa0, 0x12, 0x20, 0xb3, 0x83, 0x10, + 0xca, 0x94, 0xec, 0xb8, 0x78, 0x90, 0xa5, 0x84, 0xa4, 0xb8, 0x38, 0xd2, 0x32, 0x73, 0x52, 0xf3, + 0x12, 0x73, 0x53, 0xa1, 0x56, 0xc3, 0xf9, 0x20, 0x27, 0x25, 0xe5, 0xa7, 0x54, 0x42, 0x2d, 0x06, + 0xb3, 0x9d, 0x4c, 0x2e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, + 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, + 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, + 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xbd, 0x6d, 0x0c, + 0x08, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x90, 0x9e, 0xed, 0x3d, 0x01, 0x00, 0x00, +} + +func (this *AlertConfigDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*AlertConfigDesc) + if !ok { + that2, ok := that.(AlertConfigDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.User != that1.User { + return false + } + if this.RawConfig != that1.RawConfig { + return false + } + if len(this.Templates) != len(that1.Templates) { + return false + } + for i := range this.Templates { + if !this.Templates[i].Equal(that1.Templates[i]) { + return false + } + } + return true +} +func (this *TemplateDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TemplateDesc) + if !ok { + that2, ok := that.(TemplateDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Filename != that1.Filename { + return false + } + if this.Body != that1.Body { + return false + } + return true +} +func (this *AlertConfigDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&alerts.AlertConfigDesc{") + s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n") + s = append(s, "RawConfig: "+fmt.Sprintf("%#v", this.RawConfig)+",\n") + if this.Templates != nil { + s = append(s, "Templates: "+fmt.Sprintf("%#v", this.Templates)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TemplateDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&alerts.TemplateDesc{") + s = append(s, "Filename: "+fmt.Sprintf("%#v", this.Filename)+",\n") + s = append(s, "Body: "+fmt.Sprintf("%#v", this.Body)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAlerts(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *AlertConfigDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlertConfigDesc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AlertConfigDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Templates) > 0 { + for iNdEx := len(m.Templates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Templates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAlerts(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.RawConfig) > 0 { + i -= len(m.RawConfig) + copy(dAtA[i:], m.RawConfig) + i = encodeVarintAlerts(dAtA, i, uint64(len(m.RawConfig))) + i-- + dAtA[i] = 0x12 + } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintAlerts(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TemplateDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateDesc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Body) > 0 { + i -= len(m.Body) + copy(dAtA[i:], m.Body) + i = encodeVarintAlerts(dAtA, i, uint64(len(m.Body))) + i-- + dAtA[i] = 0x12 + } + if len(m.Filename) > 0 { + i -= len(m.Filename) + copy(dAtA[i:], m.Filename) + i = encodeVarintAlerts(dAtA, i, uint64(len(m.Filename))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintAlerts(dAtA []byte, offset int, v uint64) int { + offset -= sovAlerts(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AlertConfigDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.User) + if l > 0 { + n += 1 + l + sovAlerts(uint64(l)) + } + l = len(m.RawConfig) + if l > 0 { + n += 1 + l + sovAlerts(uint64(l)) + } + if len(m.Templates) > 0 { + for _, e := range m.Templates { + l = e.Size() + n += 1 + l + sovAlerts(uint64(l)) + } + } + return n +} + +func (m *TemplateDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Filename) + if l > 0 { + n += 1 + l + sovAlerts(uint64(l)) + } + l = len(m.Body) + if l > 0 { + n += 1 + l + sovAlerts(uint64(l)) + } + return n +} + +func sovAlerts(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAlerts(x uint64) (n int) { + return sovAlerts(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AlertConfigDesc) String() string { + if this == nil { + return "nil" + } + repeatedStringForTemplates := "[]*TemplateDesc{" + for _, f := range this.Templates { + repeatedStringForTemplates += strings.Replace(f.String(), "TemplateDesc", "TemplateDesc", 1) + "," + } + repeatedStringForTemplates += "}" + s := strings.Join([]string{`&AlertConfigDesc{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `RawConfig:` + fmt.Sprintf("%v", this.RawConfig) + `,`, + `Templates:` + repeatedStringForTemplates + `,`, + `}`, + }, "") + return s +} +func (this *TemplateDesc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateDesc{`, + `Filename:` + fmt.Sprintf("%v", this.Filename) + `,`, + `Body:` + fmt.Sprintf("%v", this.Body) + `,`, + `}`, + }, "") + return s +} +func valueToStringAlerts(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AlertConfigDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAlerts + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlertConfigDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlertConfigDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAlerts + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAlerts + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAlerts + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RawConfig", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAlerts + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAlerts + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAlerts + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RawConfig = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Templates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAlerts + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAlerts + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAlerts + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Templates = append(m.Templates, &TemplateDesc{}) + if err := m.Templates[len(m.Templates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAlerts(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAlerts + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAlerts + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAlerts + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAlerts + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAlerts + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAlerts + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filename = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAlerts + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAlerts + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAlerts + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Body = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAlerts(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAlerts + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAlerts + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAlerts(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAlerts + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAlerts + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAlerts + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAlerts + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthAlerts + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAlerts + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipAlerts(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthAlerts + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthAlerts = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAlerts = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/alerts.proto b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/alerts.proto new file mode 100644 index 000000000000..8626ec981c56 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/alerts.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package alerts; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +message AlertConfigDesc { + string user = 1; + string raw_config = 2; + + repeated TemplateDesc templates = 3; +} + +message TemplateDesc { + string filename = 1; + string body = 2; +} \ No newline at end of file diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/configdb/store.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/configdb/store.go new file mode 100644 index 000000000000..92dd964d9af9 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/configdb/store.go @@ -0,0 +1,61 @@ +package configdb + +import ( + "context" + + "github.com/cortexproject/cortex/pkg/configs/userconfig" + + "github.com/cortexproject/cortex/pkg/alertmanager/alerts" + "github.com/cortexproject/cortex/pkg/configs/client" +) + +// Store is a concrete implementation of RuleStore that sources rules from the config service +type Store struct { + configClient client.Client + since userconfig.ID + alertConfigs map[string]alerts.AlertConfigDesc +} + +// NewStore constructs a Store +func NewStore(c client.Client) *Store { + return &Store{ + configClient: c, + since: 0, + alertConfigs: make(map[string]alerts.AlertConfigDesc), + } +} + +// ListAlertConfigs implements RuleStore +func (c *Store) ListAlertConfigs(ctx context.Context) (map[string]alerts.AlertConfigDesc, error) { + + configs, err := c.configClient.GetAlerts(ctx, c.since) + + if err != nil { + return nil, err + } + + for user, cfg := range configs.Configs { + if cfg.IsDeleted() { + delete(c.alertConfigs, user) + continue + } + + var templates []*alerts.TemplateDesc + for fn, template := range cfg.Config.TemplateFiles { + templates = append(templates, &alerts.TemplateDesc{ + Filename: fn, + Body: template, + }) + } + + c.alertConfigs[user] = alerts.AlertConfigDesc{ + User: user, + RawConfig: cfg.Config.AlertmanagerConfig, + Templates: templates, + } + } + + c.since = configs.GetLatestConfigID() + + return c.alertConfigs, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/local/store.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/local/store.go new file mode 100644 index 000000000000..df8010e6d5cf --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/local/store.go @@ -0,0 +1,78 @@ +package local + +import ( + "context" + "flag" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/prometheus/alertmanager/config" + + "github.com/cortexproject/cortex/pkg/alertmanager/alerts" +) + +// StoreConfig configures a static file alertmanager store +type StoreConfig struct { + Path string `yaml:"path"` +} + +// RegisterFlags registers flags related to the alertmanager file store +func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.Path, "alertmanager.storage.local.path", "", "Path at which alertmanager configurations are stored.") +} + +// Store is used to load user alertmanager configs from a local disk +type Store struct { + cfg StoreConfig +} + +// NewStore returns a new file alert store. +func NewStore(cfg StoreConfig) (*Store, error) { + return &Store{cfg}, nil +} + +// ListAlertConfigs returns a list of each users alertmanager config. +func (f *Store) ListAlertConfigs(ctx context.Context) (map[string]alerts.AlertConfigDesc, error) { + configs := map[string]alerts.AlertConfigDesc{} + err := filepath.Walk(f.cfg.Path, func(path string, info os.FileInfo, err error) error { + if err != nil { + return errors.Wrap(err, "unable to walk file path") + } + + // Ignore files that are directories or not yaml files + ext := filepath.Ext(info.Name()) + if info.IsDir() || (ext != ".yml" && ext != ".yaml") { + return nil + } + + // Ensure the file is a valid Alertmanager Config. + _, err = config.LoadFile(path) + if err != nil { + return errors.Wrap(err, "unable to load file "+path) + } + + // Load the file to be returned by the store. + content, err := ioutil.ReadFile(path) + if err != nil { + return errors.Wrap(err, "unable to read file "+path) + } + + // The file name must correspond to the user tenant ID + user := strings.TrimSuffix(info.Name(), ext) + + configs[user] = alerts.AlertConfigDesc{ + User: user, + RawConfig: string(content), + } + return nil + }) + + if err != nil { + return nil, err + } + + return configs, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go new file mode 100644 index 000000000000..4daad12f73b6 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go @@ -0,0 +1,509 @@ +package alertmanager + +import ( + "context" + "flag" + "fmt" + "html/template" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/alertmanager/cluster" + amconfig "github.com/prometheus/alertmanager/config" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/alertmanager/alerts" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/services" +) + +var backoffConfig = util.BackoffConfig{ + // Backoff for loading initial configuration set. + MinBackoff: 100 * time.Millisecond, + MaxBackoff: 2 * time.Second, +} + +const ( + // If a config sets the webhook URL to this, it will be rewritten to + // a URL derived from Config.AutoWebhookRoot + autoWebhookURL = "http://internal.monitor" + + configStatusValid = "valid" + configStatusInvalid = "invalid" + + statusPage = ` + + + Cortex Alertmanager Status + +

Cortex Alertmanager Status

+

Node

+
+
Name
{{.self.Name}}
+
Addr
{{.self.Addr}}
+
Port
{{.self.Port}}
+
+

Members

+ {{ with .members }} + + + {{ range . }} + + {{ end }} +
NameAddr
{{ .Name }}{{ .Addr }}
+ {{ else }} +

No peers

+ {{ end }} + + +` +) + +var ( + statusTemplate *template.Template +) + +func init() { + statusTemplate = template.Must(template.New("statusPage").Funcs(map[string]interface{}{ + "state": func(enabled bool) string { + if enabled { + return "enabled" + } + return "disabled" + }, + }).Parse(statusPage)) +} + +// MultitenantAlertmanagerConfig is the configuration for a multitenant Alertmanager. +type MultitenantAlertmanagerConfig struct { + DataDir string `yaml:"data_dir"` + Retention time.Duration `yaml:"retention"` + ExternalURL flagext.URLValue `yaml:"external_url"` + PollInterval time.Duration `yaml:"poll_interval"` + + ClusterBindAddr string `yaml:"cluster_bind_address"` + ClusterAdvertiseAddr string `yaml:"cluster_advertise_address"` + Peers flagext.StringSlice `yaml:"peers"` + PeerTimeout time.Duration `yaml:"peer_timeout"` + + FallbackConfigFile string `yaml:"fallback_config_file"` + AutoWebhookRoot string `yaml:"auto_webhook_root"` + + Store AlertStoreConfig `yaml:"storage"` +} + +const defaultClusterAddr = "0.0.0.0:9094" + +// RegisterFlags adds the flags required to config this to the given FlagSet. +func (cfg *MultitenantAlertmanagerConfig) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.DataDir, "alertmanager.storage.path", "data/", "Base path for data storage.") + f.DurationVar(&cfg.Retention, "alertmanager.storage.retention", 5*24*time.Hour, "How long to keep data for.") + + f.Var(&cfg.ExternalURL, "alertmanager.web.external-url", "The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). Used for generating relative and absolute links back to Alertmanager itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Alertmanager. If omitted, relevant URL components will be derived automatically.") + + f.StringVar(&cfg.FallbackConfigFile, "alertmanager.configs.fallback", "", "Filename of fallback config to use if none specified for instance.") + f.StringVar(&cfg.AutoWebhookRoot, "alertmanager.configs.auto-webhook-root", "", "Root of URL to generate if config is "+autoWebhookURL) + f.DurationVar(&cfg.PollInterval, "alertmanager.configs.poll-interval", 15*time.Second, "How frequently to poll Cortex configs") + + f.StringVar(&cfg.ClusterBindAddr, "cluster.listen-address", defaultClusterAddr, "Listen address for cluster.") + f.StringVar(&cfg.ClusterAdvertiseAddr, "cluster.advertise-address", "", "Explicit address to advertise in cluster.") + f.Var(&cfg.Peers, "cluster.peer", "Initial peers (may be repeated).") + f.DurationVar(&cfg.PeerTimeout, "cluster.peer-timeout", time.Second*15, "Time to wait between peers to send notifications.") + + cfg.Store.RegisterFlags(f) +} + +type multitenantAlertmanagerMetrics struct { + totalConfigs *prometheus.GaugeVec +} + +func newMultitenantAlertmanagerMetrics(reg prometheus.Registerer) *multitenantAlertmanagerMetrics { + m := &multitenantAlertmanagerMetrics{} + + m.totalConfigs = promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "cortex", + Name: "alertmanager_configs", + Help: "How many configs the multitenant alertmanager knows about.", + }, []string{"status"}) + m.totalConfigs.WithLabelValues(configStatusInvalid).Set(0) + m.totalConfigs.WithLabelValues(configStatusValid).Set(0) + + return m +} + +// A MultitenantAlertmanager manages Alertmanager instances for multiple +// organizations. +type MultitenantAlertmanager struct { + services.Service + + cfg *MultitenantAlertmanagerConfig + + store AlertStore + + // The fallback config is stored as a string and parsed every time it's needed + // because we mutate the parsed results and don't want those changes to take + // effect here. + fallbackConfig string + + // All the organization configurations that we have. Only used for instrumentation. + cfgs map[string]alerts.AlertConfigDesc + + alertmanagersMtx sync.Mutex + alertmanagers map[string]*Alertmanager + + logger log.Logger + alertmanagerMetrics *alertmanagerMetrics + multitenantMetrics *multitenantAlertmanagerMetrics + + peer *cluster.Peer +} + +// NewMultitenantAlertmanager creates a new MultitenantAlertmanager. +func NewMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, logger log.Logger, registerer prometheus.Registerer) (*MultitenantAlertmanager, error) { + err := os.MkdirAll(cfg.DataDir, 0777) + if err != nil { + return nil, fmt.Errorf("unable to create Alertmanager data directory %q: %s", cfg.DataDir, err) + } + + var fallbackConfig []byte + if cfg.FallbackConfigFile != "" { + fallbackConfig, err = ioutil.ReadFile(cfg.FallbackConfigFile) + if err != nil { + return nil, fmt.Errorf("unable to read fallback config %q: %s", cfg.FallbackConfigFile, err) + } + _, err = amconfig.LoadFile(cfg.FallbackConfigFile) + if err != nil { + return nil, fmt.Errorf("unable to load fallback config %q: %s", cfg.FallbackConfigFile, err) + } + } + + var peer *cluster.Peer + if cfg.ClusterBindAddr != "" { + peer, err = cluster.Create( + log.With(logger, "component", "cluster"), + registerer, + cfg.ClusterBindAddr, + cfg.ClusterAdvertiseAddr, + cfg.Peers, + true, + cluster.DefaultPushPullInterval, + cluster.DefaultGossipInterval, + cluster.DefaultTcpTimeout, + cluster.DefaultProbeTimeout, + cluster.DefaultProbeInterval, + ) + if err != nil { + return nil, errors.Wrap(err, "unable to initialize gossip mesh") + } + err = peer.Join(cluster.DefaultReconnectInterval, cluster.DefaultReconnectTimeout) + if err != nil { + level.Warn(logger).Log("msg", "unable to join gossip mesh", "err", err) + } + go peer.Settle(context.Background(), cluster.DefaultGossipInterval) + } + + store, err := NewAlertStore(cfg.Store) + if err != nil { + return nil, err + } + + return createMultitenantAlertmanager(cfg, fallbackConfig, peer, store, logger, registerer), nil +} + +func createMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, fallbackConfig []byte, peer *cluster.Peer, store AlertStore, logger log.Logger, registerer prometheus.Registerer) *MultitenantAlertmanager { + am := &MultitenantAlertmanager{ + cfg: cfg, + fallbackConfig: string(fallbackConfig), + cfgs: map[string]alerts.AlertConfigDesc{}, + alertmanagers: map[string]*Alertmanager{}, + alertmanagerMetrics: newAlertmanagerMetrics(), + multitenantMetrics: newMultitenantAlertmanagerMetrics(registerer), + peer: peer, + store: store, + logger: log.With(logger, "component", "MultiTenantAlertmanager"), + } + + if registerer != nil { + registerer.MustRegister(am.alertmanagerMetrics) + } + + am.Service = services.NewTimerService(am.cfg.PollInterval, am.starting, am.iteration, am.stopping) + return am +} + +func (am *MultitenantAlertmanager) starting(ctx context.Context) error { + // Load initial set of all configurations before polling for new ones. + am.syncConfigs(am.loadAllConfigs()) + return nil +} + +func (am *MultitenantAlertmanager) iteration(ctx context.Context) error { + err := am.updateConfigs() + if err != nil { + level.Warn(am.logger).Log("msg", "error updating configs", "err", err) + } + // Returning error here would stop "MultitenantAlertmanager" service completely, + // so we return nil to keep service running. + return nil +} + +// stopping runs when MultitenantAlertmanager transitions to Stopping state. +func (am *MultitenantAlertmanager) stopping(_ error) error { + am.alertmanagersMtx.Lock() + for _, am := range am.alertmanagers { + am.Stop() + } + am.alertmanagersMtx.Unlock() + err := am.peer.Leave(am.cfg.PeerTimeout) + if err != nil { + level.Warn(am.logger).Log("msg", "failed to leave the cluster", "err", err) + } + level.Debug(am.logger).Log("msg", "stopping") + return nil +} + +// Load the full set of configurations from the alert store, retrying with backoff +// until we can get them. +func (am *MultitenantAlertmanager) loadAllConfigs() map[string]alerts.AlertConfigDesc { + backoff := util.NewBackoff(context.Background(), backoffConfig) + for { + cfgs, err := am.poll() + if err == nil { + level.Debug(am.logger).Log("msg", "initial configuration load", "num_configs", len(cfgs)) + return cfgs + } + level.Warn(am.logger).Log("msg", "error fetching all configurations, backing off", "err", err) + backoff.Wait() + } +} + +func (am *MultitenantAlertmanager) updateConfigs() error { + cfgs, err := am.poll() + if err != nil { + return err + } + am.syncConfigs(cfgs) + return nil +} + +// poll the alert store. Not re-entrant. +func (am *MultitenantAlertmanager) poll() (map[string]alerts.AlertConfigDesc, error) { + cfgs, err := am.store.ListAlertConfigs(context.Background()) + if err != nil { + return nil, err + } + return cfgs, nil +} + +func (am *MultitenantAlertmanager) syncConfigs(cfgs map[string]alerts.AlertConfigDesc) { + invalid := 0 // Count the number of invalid configs as we go. + + level.Debug(am.logger).Log("msg", "adding configurations", "num_configs", len(cfgs)) + for _, cfg := range cfgs { + err := am.setConfig(cfg) + if err != nil { + invalid++ + level.Warn(am.logger).Log("msg", "error applying config", "err", err) + } + } + + am.alertmanagersMtx.Lock() + defer am.alertmanagersMtx.Unlock() + for user, userAM := range am.alertmanagers { + if _, exists := cfgs[user]; !exists { + // The user alertmanager is only paused in order to retain the prometheus metrics + // it has reported to its registry. If a new config for this user appears, this structure + // will be reused. + level.Info(am.logger).Log("msg", "deactivating per-tenant alertmanager", "user", user) + userAM.Pause() + delete(am.cfgs, user) + level.Info(am.logger).Log("msg", "deactivated per-tenant alertmanager", "user", user) + } + } + am.multitenantMetrics.totalConfigs.WithLabelValues(configStatusInvalid).Set(float64(invalid)) + am.multitenantMetrics.totalConfigs.WithLabelValues(configStatusValid).Set(float64(len(am.cfgs) - invalid)) +} + +func (am *MultitenantAlertmanager) transformConfig(userID string, amConfig *amconfig.Config) (*amconfig.Config, error) { + if amConfig == nil { // shouldn't happen, but check just in case + return nil, fmt.Errorf("no usable Cortex configuration for %v", userID) + } + if am.cfg.AutoWebhookRoot != "" { + for _, r := range amConfig.Receivers { + for _, w := range r.WebhookConfigs { + if w.URL.String() == autoWebhookURL { + u, err := url.Parse(am.cfg.AutoWebhookRoot + "/" + userID + "/monitor") + if err != nil { + return nil, err + } + w.URL = &amconfig.URL{URL: u} + } + } + } + } + + return amConfig, nil +} + +func (am *MultitenantAlertmanager) createTemplatesFile(userID, fn, content string) (bool, error) { + dir := filepath.Join(am.cfg.DataDir, "templates", userID, filepath.Dir(fn)) + err := os.MkdirAll(dir, 0755) + if err != nil { + return false, fmt.Errorf("unable to create Alertmanager templates directory %q: %s", dir, err) + } + + file := filepath.Join(dir, fn) + // Check if the template file already exists and if it has changed + if tmpl, err := ioutil.ReadFile(file); err == nil && string(tmpl) == content { + return false, nil + } + + if err := ioutil.WriteFile(file, []byte(content), 0644); err != nil { + return false, fmt.Errorf("unable to create Alertmanager template file %q: %s", file, err) + } + + return true, nil +} + +// setConfig applies the given configuration to the alertmanager for `userID`, +// creating an alertmanager if it doesn't already exist. +func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error { + am.alertmanagersMtx.Lock() + existing, hasExisting := am.alertmanagers[cfg.User] + am.alertmanagersMtx.Unlock() + var userAmConfig *amconfig.Config + var err error + var hasTemplateChanges bool + + for _, tmpl := range cfg.Templates { + hasChanged, err := am.createTemplatesFile(cfg.User, tmpl.Filename, tmpl.Body) + if err != nil { + return err + } + + if hasChanged { + hasTemplateChanges = true + } + } + + level.Debug(am.logger).Log("msg", "setting config", "user", cfg.User) + + if cfg.RawConfig == "" { + if am.fallbackConfig == "" { + return fmt.Errorf("blank Alertmanager configuration for %v", cfg.User) + } + level.Info(am.logger).Log("msg", "blank Alertmanager configuration; using fallback", "user_id", cfg.User) + userAmConfig, err = amconfig.Load(am.fallbackConfig) + if err != nil { + return fmt.Errorf("unable to load fallback configuration for %v: %v", cfg.User, err) + } + } else { + userAmConfig, err = amconfig.Load(cfg.RawConfig) + if err != nil && hasExisting { + // XXX: This means that if a user has a working configuration and + // they submit a broken one, we'll keep processing the last known + // working configuration, and they'll never know. + // TODO: Provide a way of communicating this to the user and for removing + // Alertmanager instances. + return fmt.Errorf("invalid Cortex configuration for %v: %v", cfg.User, err) + } + } + + if userAmConfig, err = am.transformConfig(cfg.User, userAmConfig); err != nil { + return err + } + + // If no Alertmanager instance exists for this user yet, start one. + if !hasExisting { + level.Debug(am.logger).Log("msg", "initializing new per-tenant alertmanager", "user", cfg.User) + newAM, err := am.newAlertmanager(cfg.User, userAmConfig) + if err != nil { + return err + } + am.alertmanagersMtx.Lock() + am.alertmanagers[cfg.User] = newAM + am.alertmanagersMtx.Unlock() + } else if am.cfgs[cfg.User].RawConfig != cfg.RawConfig || hasTemplateChanges { + level.Info(am.logger).Log("msg", "updating new per-tenant alertmanager", "user", cfg.User) + // If the config changed, apply the new one. + err := existing.ApplyConfig(cfg.User, userAmConfig) + if err != nil { + return fmt.Errorf("unable to apply Alertmanager config for user %v: %v", cfg.User, err) + } + } + am.cfgs[cfg.User] = cfg + return nil +} + +func (am *MultitenantAlertmanager) newAlertmanager(userID string, amConfig *amconfig.Config) (*Alertmanager, error) { + reg := prometheus.NewRegistry() + newAM, err := New(&Config{ + UserID: userID, + DataDir: am.cfg.DataDir, + Logger: util.Logger, + Peer: am.peer, + PeerTimeout: am.cfg.PeerTimeout, + Retention: am.cfg.Retention, + ExternalURL: am.cfg.ExternalURL.URL, + }, reg) + if err != nil { + return nil, fmt.Errorf("unable to start Alertmanager for user %v: %v", userID, err) + } + + if err := newAM.ApplyConfig(userID, amConfig); err != nil { + return nil, fmt.Errorf("unable to apply initial config for user %v: %v", userID, err) + } + + am.alertmanagerMetrics.addUserRegistry(userID, reg) + return newAM, nil +} + +// ServeHTTP serves the Alertmanager's web UI and API. +func (am *MultitenantAlertmanager) ServeHTTP(w http.ResponseWriter, req *http.Request) { + userID, _, err := user.ExtractOrgIDFromHTTPRequest(req) + if err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + am.alertmanagersMtx.Lock() + userAM, ok := am.alertmanagers[userID] + am.alertmanagersMtx.Unlock() + + if !ok || !userAM.IsActive() { + http.Error(w, fmt.Sprintf("no Alertmanager for this user ID"), http.StatusNotFound) + return + } + userAM.mux.ServeHTTP(w, req) +} + +// GetStatusHandler returns the status handler for this multi-tenant +// alertmanager. +func (am *MultitenantAlertmanager) GetStatusHandler() StatusHandler { + return StatusHandler{ + am: am, + } +} + +// StatusHandler shows the status of the alertmanager. +type StatusHandler struct { + am *MultitenantAlertmanager +} + +// ServeHTTP serves the status of the alertmanager. +func (s StatusHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + err := statusTemplate.Execute(w, s.am.peer.Info()) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go new file mode 100644 index 000000000000..7a94b6761142 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go @@ -0,0 +1,47 @@ +package alertmanager + +import ( + "context" + "flag" + "fmt" + + "github.com/cortexproject/cortex/pkg/alertmanager/alerts" + "github.com/cortexproject/cortex/pkg/alertmanager/alerts/configdb" + "github.com/cortexproject/cortex/pkg/alertmanager/alerts/local" + "github.com/cortexproject/cortex/pkg/configs/client" +) + +// AlertStore stores and configures users rule configs +type AlertStore interface { + ListAlertConfigs(ctx context.Context) (map[string]alerts.AlertConfigDesc, error) +} + +// AlertStoreConfig configures the alertmanager backend +type AlertStoreConfig struct { + Type string `yaml:"type"` + ConfigDB client.Config `yaml:"configdb"` + Local local.StoreConfig `yaml:"local"` +} + +// RegisterFlags registers flags. +func (cfg *AlertStoreConfig) RegisterFlags(f *flag.FlagSet) { + cfg.Local.RegisterFlags(f) + cfg.ConfigDB.RegisterFlagsWithPrefix("alertmanager.", f) + f.StringVar(&cfg.Type, "alertmanager.storage.type", "configdb", "Type of backend to use to store alertmanager configs. Supported values are: \"configdb\", \"local\".") +} + +// NewAlertStore returns a new rule storage backend poller and store +func NewAlertStore(cfg AlertStoreConfig) (AlertStore, error) { + switch cfg.Type { + case "configdb": + c, err := client.New(cfg.ConfigDB) + if err != nil { + return nil, err + } + return configdb.NewStore(c), nil + case "local": + return local.NewStore(cfg.Local) + default: + return nil, fmt.Errorf("unrecognized alertmanager storage backend %v, choose one of: \"configdb\", \"local\"", cfg.Type) + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/api/api.go new file mode 100644 index 000000000000..ad392849d28a --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/api/api.go @@ -0,0 +1,341 @@ +package api + +import ( + "errors" + "flag" + "net/http" + "regexp" + "strings" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/common/route" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + v1 "github.com/prometheus/prometheus/web/api/v1" + "github.com/weaveworks/common/middleware" + "github.com/weaveworks/common/server" + + "github.com/gorilla/mux" + + "github.com/cortexproject/cortex/pkg/alertmanager" + "github.com/cortexproject/cortex/pkg/chunk/purger" + "github.com/cortexproject/cortex/pkg/compactor" + "github.com/cortexproject/cortex/pkg/distributor" + "github.com/cortexproject/cortex/pkg/ingester" + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/querier" + "github.com/cortexproject/cortex/pkg/querier/frontend" + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/ruler" + "github.com/cortexproject/cortex/pkg/storegateway" + "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" + "github.com/cortexproject/cortex/pkg/util/push" +) + +type Config struct { + AlertmanagerHTTPPrefix string `yaml:"alertmanager_http_prefix"` + PrometheusHTTPPrefix string `yaml:"prometheus_http_prefix"` + + // The following configs are injected by the upstream caller. + ServerPrefix string `yaml:"-"` + LegacyHTTPPrefix string `yaml:"-"` + HTTPAuthMiddleware middleware.Func `yaml:"-"` +} + +// RegisterFlags adds the flags required to config this to the given FlagSet. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix("", f) +} + +// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet with the set prefix. +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.AlertmanagerHTTPPrefix, prefix+"http.alertmanager-http-prefix", "/alertmanager", "HTTP URL path under which the Alertmanager ui and api will be served.") + f.StringVar(&cfg.PrometheusHTTPPrefix, prefix+"http.prometheus-http-prefix", "/prometheus", "HTTP URL path under which the Prometheus api will be served.") +} + +type API struct { + cfg Config + authMiddleware middleware.Func + server *server.Server + logger log.Logger +} + +func New(cfg Config, s *server.Server, logger log.Logger) (*API, error) { + // Ensure the encoded path is used. Required for the rules API + s.HTTP.UseEncodedPath() + + api := &API{ + cfg: cfg, + authMiddleware: cfg.HTTPAuthMiddleware, + server: s, + logger: logger, + } + + // If no authentication middleware is present in the config, use the default authentication middleware. + if cfg.HTTPAuthMiddleware == nil { + api.authMiddleware = middleware.AuthenticateUser + } + + return api, nil +} + +func (a *API) RegisterRoute(path string, handler http.Handler, auth bool, methods ...string) { + a.registerRouteWithRouter(a.server.HTTP, path, handler, auth, methods...) +} + +func (a *API) registerRouteWithRouter(router *mux.Router, path string, handler http.Handler, auth bool, methods ...string) { + level.Debug(a.logger).Log("msg", "api: registering route", "methods", strings.Join(methods, ","), "path", path, "auth", auth) + if auth { + handler = a.authMiddleware.Wrap(handler) + } + if len(methods) == 0 { + router.Path(path).Handler(handler) + return + } + router.Path(path).Methods(methods...).Handler(handler) +} + +func (a *API) RegisterRoutesWithPrefix(prefix string, handler http.Handler, auth bool, methods ...string) { + level.Debug(a.logger).Log("msg", "api: registering route", "methods", strings.Join(methods, ","), "prefix", prefix, "auth", auth) + if auth { + handler = a.authMiddleware.Wrap(handler) + } + if len(methods) == 0 { + a.server.HTTP.PathPrefix(prefix).Handler(handler) + return + } + a.server.HTTP.PathPrefix(prefix).Methods(methods...).Handler(handler) +} + +// Latest Prometheus requires r.RemoteAddr to be set to addr:port, otherwise it reject the request. +// Requests to Querier sometimes doesn't have that (if they are fetched from Query-Frontend). +// Prometheus uses this when logging queries to QueryLogger, but Cortex doesn't call engine.SetQueryLogger to set one. +// +// Can be removed when (if) https://github.com/prometheus/prometheus/pull/6840 is merged. +func fakeRemoteAddr(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RemoteAddr == "" { + r.RemoteAddr = "127.0.0.1:8888" + } + handler.ServeHTTP(w, r) + }) +} + +// RegisterAlertmanager registers endpoints associated with the alertmanager. It will only +// serve endpoints using the legacy http-prefix if it is not run as a single binary. +func (a *API) RegisterAlertmanager(am *alertmanager.MultitenantAlertmanager, target bool) { + // Ensure this route is registered before the prefixed AM route + a.RegisterRoute("/multitenant_alertmanager/status", am.GetStatusHandler(), false) + + // UI components lead to a large number of routes to support, utilize a path prefix instead + a.RegisterRoutesWithPrefix(a.cfg.AlertmanagerHTTPPrefix, am, true) + level.Debug(a.logger).Log("msg", "api: registering alertmanager", "path_prefix", a.cfg.AlertmanagerHTTPPrefix) + + // If the target is Alertmanager, enable the legacy behaviour. Otherwise only enable + // the component routed API. + if target { + a.RegisterRoute("/status", am.GetStatusHandler(), false) + a.RegisterRoutesWithPrefix(a.cfg.LegacyHTTPPrefix, am, true) + } +} + +// RegisterAPI registers the standard endpoints associated with a running Cortex. +func (a *API) RegisterAPI(cfg interface{}) { + a.RegisterRoute("/config", configHandler(cfg), false) + a.RegisterRoute("/", http.HandlerFunc(indexHandler), false) +} + +// RegisterDistributor registers the endpoints associated with the distributor. +func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distributor.Config) { + a.RegisterRoute("/api/v1/push", push.Handler(pushConfig, d.Push), true) + a.RegisterRoute("/distributor/all_user_stats", http.HandlerFunc(d.AllUserStatsHandler), false) + a.RegisterRoute("/distributor/ha_tracker", d.HATracker, false) + + // Legacy Routes + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/push", push.Handler(pushConfig, d.Push), true) + a.RegisterRoute("/all_user_stats", http.HandlerFunc(d.AllUserStatsHandler), false) + a.RegisterRoute("/ha-tracker", d.HATracker, false) +} + +// RegisterIngester registers the ingesters HTTP and GRPC service +func (a *API) RegisterIngester(i *ingester.Ingester, pushConfig distributor.Config) { + client.RegisterIngesterServer(a.server.GRPC, i) + + a.RegisterRoute("/ingester/flush", http.HandlerFunc(i.FlushHandler), false) + a.RegisterRoute("/ingester/shutdown", http.HandlerFunc(i.ShutdownHandler), false) + a.RegisterRoute("/ingester/push", push.Handler(pushConfig, i.Push), true) // For testing and debugging. + + // Legacy Routes + a.RegisterRoute("/flush", http.HandlerFunc(i.FlushHandler), false) + a.RegisterRoute("/shutdown", http.HandlerFunc(i.ShutdownHandler), false) + a.RegisterRoute("/push", push.Handler(pushConfig, i.Push), true) // For testing and debugging. +} + +// RegisterPurger registers the endpoints associated with the Purger/DeleteStore. They do not exacty +// match the Prometheus API but mirror it closely enough to justify their routing under the Prometheus +// component/ +func (a *API) RegisterPurger(store *purger.DeleteStore) { + deleteRequestHandler := purger.NewDeleteRequestHandler(store) + + a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/admin/tsdb/delete_series", http.HandlerFunc(deleteRequestHandler.AddDeleteRequestHandler), true, "PUT", "POST") + a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/admin/tsdb/delete_series", http.HandlerFunc(deleteRequestHandler.GetAllDeleteRequestsHandler), true, "GET") + + // Legacy Routes + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/api/v1/admin/tsdb/delete_series", http.HandlerFunc(deleteRequestHandler.AddDeleteRequestHandler), true, "PUT", "POST") + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/api/v1/admin/tsdb/delete_series", http.HandlerFunc(deleteRequestHandler.GetAllDeleteRequestsHandler), true, "GET") +} + +// RegisterRuler registers routes associated with the Ruler service. If the +// API is not enabled only the ring route is registered. +func (a *API) RegisterRuler(r *ruler.Ruler, apiEnabled bool) { + a.RegisterRoute("/ruler/ring", r, false) + + // Legacy Ring Route + a.RegisterRoute("/ruler_ring", r, false) + + if apiEnabled { + // Prometheus Rule API Routes + a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/rules", http.HandlerFunc(r.PrometheusRules), true, "GET") + a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/alerts", http.HandlerFunc(r.PrometheusAlerts), true, "GET") + + ruler.RegisterRulerServer(a.server.GRPC, r) + + // Ruler API Routes + a.RegisterRoute("/api/v1/rules", http.HandlerFunc(r.ListRules), true, "GET") + a.RegisterRoute("/api/v1/rules/{namespace}", http.HandlerFunc(r.ListRules), true, "GET") + a.RegisterRoute("/api/v1/rules/{namespace}/{groupName}", http.HandlerFunc(r.GetRuleGroup), true, "GET") + a.RegisterRoute("/api/v1/rules/{namespace}", http.HandlerFunc(r.CreateRuleGroup), true, "POST") + a.RegisterRoute("/api/v1/rules/{namespace}/{groupName}", http.HandlerFunc(r.DeleteRuleGroup), true, "DELETE") + + // Legacy Prometheus Rule API Routes + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/api/v1/rules", http.HandlerFunc(r.PrometheusRules), true, "GET") + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/api/v1/alerts", http.HandlerFunc(r.PrometheusAlerts), true, "GET") + + // Legacy Ruler API Routes + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/rules", http.HandlerFunc(r.ListRules), true, "GET") + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/rules/{namespace}", http.HandlerFunc(r.ListRules), true, "GET") + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/rules/{namespace}/{groupName}", http.HandlerFunc(r.GetRuleGroup), true, "GET") + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/rules/{namespace}", http.HandlerFunc(r.CreateRuleGroup), true, "POST") + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/rules/{namespace}/{groupName}", http.HandlerFunc(r.DeleteRuleGroup), true, "DELETE") + } +} + +// // RegisterRing registers the ring UI page associated with the distributor for writes. +func (a *API) RegisterRing(r *ring.Ring) { + a.RegisterRoute("/ingester/ring", r, false) + + // Legacy Route + a.RegisterRoute("/ring", r, false) +} + +// RegisterStoreGateway registers the ring UI page associated with the store-gateway. +func (a *API) RegisterStoreGateway(s *storegateway.StoreGateway) { + storegatewaypb.RegisterStoreGatewayServer(a.server.GRPC, s) + + a.RegisterRoute("/store-gateway/ring", http.HandlerFunc(s.RingHandler), false) +} + +// RegisterCompactor registers the ring UI page associated with the compactor. +func (a *API) RegisterCompactor(c *compactor.Compactor) { + a.RegisterRoute("/compactor/ring", http.HandlerFunc(c.RingHandler), false) +} + +// RegisterQuerier registers the Prometheus routes supported by the +// Cortex querier service. Currently this can not be registered simultaneously +// with the QueryFrontend. +func (a *API) RegisterQuerier(queryable storage.Queryable, engine *promql.Engine, distributor *distributor.Distributor, registerRoutesExternally bool) http.Handler { + api := v1.NewAPI( + engine, + queryable, + querier.DummyTargetRetriever{}, + querier.DummyAlertmanagerRetriever{}, + func() config.Config { return config.Config{} }, + map[string]string{}, // TODO: include configuration flags + func(f http.HandlerFunc) http.HandlerFunc { return f }, + func() v1.TSDBAdmin { return nil }, // Only needed for admin APIs. + false, // Disable admin APIs. + a.logger, + querier.DummyRulesRetriever{}, + 0, 0, 0, // Remote read samples and concurrency limit. + regexp.MustCompile(".*"), + func() (v1.RuntimeInfo, error) { return v1.RuntimeInfo{}, errors.New("not implemented") }, + &v1.PrometheusVersion{}, + ) + + // these routes are always registered to the default server + a.RegisterRoute("/api/v1/user_stats", http.HandlerFunc(distributor.UserStatsHandler), true) + a.RegisterRoute("/api/v1/chunks", querier.ChunksHandler(queryable), true) + + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/user_stats", http.HandlerFunc(distributor.UserStatsHandler), true) + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/chunks", querier.ChunksHandler(queryable), true) + + // these routes are either registered the default server OR to an internal mux. The internal mux is + // for use in a single binary mode when both the query frontend and the querier would attempt to claim these routes + // TODO: Add support to expose querier paths with a configurable prefix in single binary mode. + router := mux.NewRouter() + if registerRoutesExternally { + router = a.server.HTTP + } + + promRouter := route.New().WithPrefix(a.cfg.ServerPrefix + a.cfg.PrometheusHTTPPrefix + "/api/v1") + api.Register(promRouter) + promHandler := fakeRemoteAddr(promRouter) + + a.registerRouteWithRouter(router, a.cfg.PrometheusHTTPPrefix+"/api/v1/read", querier.RemoteReadHandler(queryable), true, "GET") + a.registerRouteWithRouter(router, a.cfg.PrometheusHTTPPrefix+"/api/v1/query", promHandler, true, "GET", "POST") + a.registerRouteWithRouter(router, a.cfg.PrometheusHTTPPrefix+"/api/v1/query_range", promHandler, true, "GET", "POST") + a.registerRouteWithRouter(router, a.cfg.PrometheusHTTPPrefix+"/api/v1/labels", promHandler, true, "GET", "POST") + a.registerRouteWithRouter(router, a.cfg.PrometheusHTTPPrefix+"/api/v1/label/{name}/values", promHandler, true, "GET") + a.registerRouteWithRouter(router, a.cfg.PrometheusHTTPPrefix+"/api/v1/series", promHandler, true, "GET", "POST", "DELETE") + a.registerRouteWithRouter(router, a.cfg.PrometheusHTTPPrefix+"/api/v1/metadata", promHandler, true, "GET") + + legacyPromRouter := route.New().WithPrefix(a.cfg.ServerPrefix + a.cfg.LegacyHTTPPrefix + "/api/v1") + api.Register(legacyPromRouter) + legacyPromHandler := fakeRemoteAddr(legacyPromRouter) + + a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/read", querier.RemoteReadHandler(queryable), true, "GET") + a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/query", legacyPromHandler, true, "GET", "POST") + a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/query_range", legacyPromHandler, true, "GET", "POST") + a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/labels", legacyPromHandler, true, "GET", "POST") + a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/label/{name}/values", legacyPromHandler, true, "GET") + a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/series", legacyPromHandler, true, "GET", "POST", "DELETE") + a.registerRouteWithRouter(router, a.cfg.LegacyHTTPPrefix+"/api/v1/metadata", legacyPromHandler, true, "GET") + + return router +} + +// RegisterQueryFrontend registers the Prometheus routes supported by the +// Cortex querier service. Currently this can not be registered simultaneously +// with the Querier. +func (a *API) RegisterQueryFrontend(f *frontend.Frontend) { + frontend.RegisterFrontendServer(a.server.GRPC, f) + + // Previously the frontend handled all calls to the provided prefix. Instead explicit + // routing is used since it will be required to enable the frontend to be run as part + // of a single binary in the future. + a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/read", f.Handler(), true, "GET") + a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/query", f.Handler(), true, "GET", "POST") + a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/query_range", f.Handler(), true, "GET", "POST") + a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/labels", f.Handler(), true, "GET", "POST") + a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/label/{name}/values", f.Handler(), true, "GET") + a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/series", f.Handler(), true, "GET", "POST", "DELETE") + a.RegisterRoute(a.cfg.PrometheusHTTPPrefix+"/api/v1/metadata", f.Handler(), true, "GET") + + // Register Legacy Routers + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/api/v1/read", f.Handler(), true, "GET") + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/api/v1/query", f.Handler(), true, "GET", "POST") + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/api/v1/query_range", f.Handler(), true, "GET", "POST") + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/api/v1/labels", f.Handler(), true, "GET", "POST") + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/api/v1/label/{name}/values", f.Handler(), true, "GET") + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/api/v1/series", f.Handler(), true, "GET", "POST", "DELETE") + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/api/v1/metadata", f.Handler(), true, "GET") +} + +// RegisterServiceMapHandler registers the Cortex structs service handler +// TODO: Refactor this code to be accomplished using the services.ServiceManager +// or a future module manager #2291 +func (a *API) RegisterServiceMapHandler(handler http.Handler) { + a.RegisterRoute("/services", handler, false) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go b/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go new file mode 100644 index 000000000000..ffbcad8e6b47 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go @@ -0,0 +1,64 @@ +package api + +import ( + "net/http" + + "github.com/go-kit/kit/log/level" + "gopkg.in/yaml.v2" + + "github.com/cortexproject/cortex/pkg/util" +) + +// TODO: Update this content to be a template that is dynamic based on how Cortex is run. +const indexPageContent = ` + + + + + Cortex + + +

Cortex

+

Admin Endpoints:

+ + +

Dangerous:

+ + +` + +func indexHandler(w http.ResponseWriter, _ *http.Request) { + if _, err := w.Write([]byte(indexPageContent)); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func configHandler(cfg interface{}) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + out, err := yaml.Marshal(cfg) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "text/yaml") + w.WriteHeader(http.StatusOK) + if _, err := w.Write(out); err != nil { + level.Error(util.Logger).Log("msg", "error writing response", "err", err) + } + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go new file mode 100644 index 000000000000..8651d6e99baa --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go @@ -0,0 +1,429 @@ +package compactor + +import ( + "context" + "flag" + "fmt" + "hash/fnv" + "path" + "strings" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/compact" + "github.com/thanos-io/thanos/pkg/compact/downsample" + "github.com/thanos-io/thanos/pkg/objstore" + + "github.com/cortexproject/cortex/pkg/ring" + cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" +) + +// Config holds the Compactor config. +type Config struct { + BlockRanges cortex_tsdb.DurationList `yaml:"block_ranges"` + BlockSyncConcurrency int `yaml:"block_sync_concurrency"` + MetaSyncConcurrency int `yaml:"meta_sync_concurrency"` + ConsistencyDelay time.Duration `yaml:"consistency_delay"` + DataDir string `yaml:"data_dir"` + CompactionInterval time.Duration `yaml:"compaction_interval"` + CompactionRetries int `yaml:"compaction_retries"` + DeletionDelay time.Duration `yaml:"deletion_delay"` + + // Compactors sharding. + ShardingEnabled bool `yaml:"sharding_enabled"` + ShardingRing RingConfig `yaml:"sharding_ring"` + + // No need to add options to customize the retry backoff, + // given the defaults should be fine, but allow to override + // it in tests. + retryMinBackoff time.Duration `yaml:"-"` + retryMaxBackoff time.Duration `yaml:"-"` +} + +// RegisterFlags registers the Compactor flags. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.ShardingRing.RegisterFlags(f) + + cfg.BlockRanges = cortex_tsdb.DurationList{2 * time.Hour, 12 * time.Hour, 24 * time.Hour} + cfg.retryMinBackoff = 10 * time.Second + cfg.retryMaxBackoff = time.Minute + + f.Var(&cfg.BlockRanges, "compactor.block-ranges", "List of compaction time ranges.") + f.DurationVar(&cfg.ConsistencyDelay, "compactor.consistency-delay", 30*time.Minute, fmt.Sprintf("Minimum age of fresh (non-compacted) blocks before they are being processed. Malformed blocks older than the maximum of consistency-delay and %s will be removed.", compact.PartialUploadThresholdAge)) + f.IntVar(&cfg.BlockSyncConcurrency, "compactor.block-sync-concurrency", 20, "Number of Go routines to use when syncing block index and chunks files from the long term storage.") + f.IntVar(&cfg.MetaSyncConcurrency, "compactor.meta-sync-concurrency", 20, "Number of Go routines to use when syncing block meta files from the long term storage.") + f.StringVar(&cfg.DataDir, "compactor.data-dir", "./data", "Data directory in which to cache blocks and process compactions") + f.DurationVar(&cfg.CompactionInterval, "compactor.compaction-interval", time.Hour, "The frequency at which the compaction runs") + f.IntVar(&cfg.CompactionRetries, "compactor.compaction-retries", 3, "How many times to retry a failed compaction during a single compaction interval") + f.BoolVar(&cfg.ShardingEnabled, "compactor.sharding-enabled", false, "Shard tenants across multiple compactor instances. Sharding is required if you run multiple compactor instances, in order to coordinate compactions and avoid race conditions leading to the same tenant blocks simultaneously compacted by different instances.") + f.DurationVar(&cfg.DeletionDelay, "compactor.deletion-delay", 12*time.Hour, "Time before a block marked for deletion is deleted from bucket. "+ + "If not 0, blocks will be marked for deletion and compactor component will delete blocks marked for deletion from the bucket. "+ + "If delete-delay is 0, blocks will be deleted straight away. Note that deleting blocks immediately can cause query failures, "+ + "if store gateway still has the block loaded, or compactor is ignoring the deletion because it's compacting the block at the same time.") +} + +// Compactor is a multi-tenant TSDB blocks compactor based on Thanos. +type Compactor struct { + services.Service + + compactorCfg Config + storageCfg cortex_tsdb.Config + logger log.Logger + + // function that creates bucket client and TSDB compactor using the context. + // Useful for injecting mock objects from tests. + createBucketClientAndTsdbCompactor func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error) + + // Underlying compactor used to compact TSDB blocks. + tsdbCompactor tsdb.Compactor + + // Client used to run operations on the bucket storing blocks. + bucketClient objstore.Bucket + + // Ring used for sharding compactions. + ringLifecycler *ring.Lifecycler + ring *ring.Ring + + // Subservices manager (ring, lifecycler) + subservices *services.Manager + subservicesWatcher *services.FailureWatcher + + // Metrics. + compactionRunsStarted prometheus.Counter + compactionRunsCompleted prometheus.Counter + compactionRunsFailed prometheus.Counter + + blocksCleaned prometheus.Counter + blockCleanupFailures prometheus.Counter + blocksMarkedForDeletion prometheus.Counter + + // TSDB syncer metrics + syncerMetrics *syncerMetrics +} + +// NewCompactor makes a new Compactor. +func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.Config, logger log.Logger, registerer prometheus.Registerer) (*Compactor, error) { + createBucketClientAndTsdbCompactor := func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error) { + bucketClient, err := cortex_tsdb.NewBucketClient(ctx, storageCfg, "compactor", logger) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create the bucket client") + } + + if registerer != nil { + bucketClient = objstore.BucketWithMetrics( /* bucket label value */ "", bucketClient, prometheus.WrapRegistererWithPrefix("cortex_compactor_", registerer)) + } + + compactor, err := tsdb.NewLeveledCompactor(ctx, registerer, logger, compactorCfg.BlockRanges.ToMilliseconds(), downsample.NewPool()) + return bucketClient, compactor, err + } + + cortexCompactor, err := newCompactor(compactorCfg, storageCfg, logger, registerer, createBucketClientAndTsdbCompactor) + if err != nil { + return nil, errors.Wrap(err, "failed to create Cortex blocks compactor") + } + + return cortexCompactor, nil +} + +func newCompactor( + compactorCfg Config, + storageCfg cortex_tsdb.Config, + logger log.Logger, + registerer prometheus.Registerer, + createBucketClientAndTsdbCompactor func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error), +) (*Compactor, error) { + c := &Compactor{ + compactorCfg: compactorCfg, + storageCfg: storageCfg, + logger: logger, + syncerMetrics: newSyncerMetrics(registerer), + createBucketClientAndTsdbCompactor: createBucketClientAndTsdbCompactor, + + compactionRunsStarted: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_runs_started_total", + Help: "Total number of compaction runs started.", + }), + compactionRunsCompleted: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_runs_completed_total", + Help: "Total number of compaction runs successfully completed.", + }), + compactionRunsFailed: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_runs_failed_total", + Help: "Total number of compaction runs failed.", + }), + + blocksCleaned: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_blocks_cleaned_total", + Help: "Total number of blocks deleted in compactor.", + }), + blockCleanupFailures: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_block_cleanup_failures_total", + Help: "Failures encountered while deleting blocks in compactor.", + }), + blocksMarkedForDeletion: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_blocks_marked_for_deletion_total", + Help: "Total number of blocks marked for deletion in compactor.", + }), + } + + c.Service = services.NewBasicService(c.starting, c.running, c.stopping) + + return c, nil +} + +// Start the compactor. +func (c *Compactor) starting(ctx context.Context) error { + // Initialize the compactors ring if sharding is enabled. + if c.compactorCfg.ShardingEnabled { + lifecyclerCfg := c.compactorCfg.ShardingRing.ToLifecyclerConfig() + lifecycler, err := ring.NewLifecycler(lifecyclerCfg, ring.NewNoopFlushTransferer(), "compactor", ring.CompactorRingKey, false) + if err != nil { + return errors.Wrap(err, "unable to initialize compactor ring lifecycler") + } + + c.ringLifecycler = lifecycler + + ring, err := ring.New(lifecyclerCfg.RingConfig, "compactor", ring.CompactorRingKey) + if err != nil { + return errors.Wrap(err, "unable to initialize compactor ring") + } + + c.ring = ring + + c.subservices, err = services.NewManager(c.ringLifecycler, c.ring) + if err == nil { + c.subservicesWatcher = services.NewFailureWatcher() + c.subservicesWatcher.WatchManager(c.subservices) + + err = services.StartManagerAndAwaitHealthy(ctx, c.subservices) + } + + if err != nil { + return errors.Wrap(err, "unable to start compactor dependencies") + } + } + + var err error + c.bucketClient, c.tsdbCompactor, err = c.createBucketClientAndTsdbCompactor(ctx) + if err != nil && c.subservices != nil { + c.subservices.StopAsync() + } + + return errors.Wrap(err, "failed to initialize compactor objects") +} + +func (c *Compactor) stopping(_ error) error { + if c.subservices != nil { + return services.StopManagerAndAwaitStopped(context.Background(), c.subservices) + } + return nil +} + +func (c *Compactor) running(ctx context.Context) error { + // If sharding is enabled we should wait until this instance is + // ACTIVE within the ring. + if c.compactorCfg.ShardingEnabled { + level.Info(c.logger).Log("msg", "waiting until compactor is ACTIVE in the ring") + if err := ring.WaitInstanceState(ctx, c.ring, c.ringLifecycler.ID, ring.ACTIVE); err != nil { + return err + } + level.Info(c.logger).Log("msg", "compactor is ACTIVE in the ring") + } + + // Run an initial compaction before starting the interval. + c.compactUsersWithRetries(ctx) + + ticker := time.NewTicker(c.compactorCfg.CompactionInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + c.compactUsersWithRetries(ctx) + case <-ctx.Done(): + return nil + case err := <-c.subservicesWatcher.Chan(): + return errors.Wrap(err, "compactor subservice failed") + } + } +} + +func (c *Compactor) compactUsersWithRetries(ctx context.Context) { + retries := util.NewBackoff(ctx, util.BackoffConfig{ + MinBackoff: c.compactorCfg.retryMinBackoff, + MaxBackoff: c.compactorCfg.retryMaxBackoff, + MaxRetries: c.compactorCfg.CompactionRetries, + }) + + c.compactionRunsStarted.Inc() + + for retries.Ongoing() { + if success := c.compactUsers(ctx); success { + c.compactionRunsCompleted.Inc() + return + } + + retries.Wait() + } + + c.compactionRunsFailed.Inc() +} + +func (c *Compactor) compactUsers(ctx context.Context) bool { + level.Info(c.logger).Log("msg", "discovering users from bucket") + users, err := c.discoverUsers(ctx) + if err != nil { + level.Error(c.logger).Log("msg", "failed to discover users from bucket", "err", err) + return false + } + level.Info(c.logger).Log("msg", "discovered users from bucket", "users", len(users)) + + for _, userID := range users { + // Ensure the context has not been canceled (ie. compactor shutdown has been triggered). + if ctx.Err() != nil { + level.Info(c.logger).Log("msg", "interrupting compaction of user blocks", "err", err) + return false + } + + // If sharding is enabled, ensure the user ID belongs to our shard. + if c.compactorCfg.ShardingEnabled { + if owned, err := c.ownUser(userID); err != nil { + level.Warn(c.logger).Log("msg", "unable to check if user is owned by this shard", "user", userID, "err", err) + continue + } else if !owned { + level.Debug(c.logger).Log("msg", "skipping user because not owned by this shard", "user", userID) + continue + } + } + + level.Info(c.logger).Log("msg", "starting compaction of user blocks", "user", userID) + + if err = c.compactUser(ctx, userID); err != nil { + level.Error(c.logger).Log("msg", "failed to compact user blocks", "user", userID, "err", err) + continue + } + + level.Info(c.logger).Log("msg", "successfully compacted user blocks", "user", userID) + } + + return true +} + +func (c *Compactor) compactUser(ctx context.Context, userID string) error { + bucket := cortex_tsdb.NewUserBucketClient(userID, c.bucketClient) + + reg := prometheus.NewRegistry() + defer c.syncerMetrics.gatherThanosSyncerMetrics(reg) + + ulogger := util.WithUserID(userID, c.logger) + + // Filters out duplicate blocks that can be formed from two or more overlapping + // blocks that fully submatches the source blocks of the older blocks. + deduplicateBlocksFilter := block.NewDeduplicateFilter() + + // While fetching blocks, we filter out blocks that were marked for deletion by using IgnoreDeletionMarkFilter. + // The delay of deleteDelay/2 is added to ensure we fetch blocks that are meant to be deleted but do not have a replacement yet. + ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter(ulogger, bucket, time.Duration(c.compactorCfg.DeletionDelay.Seconds()/2)*time.Second) + + fetcher, err := block.NewMetaFetcher( + ulogger, + c.compactorCfg.MetaSyncConcurrency, + bucket, + // The fetcher stores cached metas in the "meta-syncer/" sub directory, + // but we prefix it with "meta-" in order to guarantee no clashing with + // the directory used by the Thanos Syncer, whatever is the user ID. + path.Join(c.compactorCfg.DataDir, "meta-"+userID), + reg, + []block.MetadataFilter{ + // List of filters to apply (order matters). + block.NewConsistencyDelayMetaFilter(ulogger, c.compactorCfg.ConsistencyDelay, reg), + ignoreDeletionMarkFilter, + deduplicateBlocksFilter, + }, + nil, + ) + if err != nil { + return err + } + + syncer, err := compact.NewSyncer( + ulogger, + reg, + bucket, + fetcher, + deduplicateBlocksFilter, + ignoreDeletionMarkFilter, + c.blocksMarkedForDeletion, + c.compactorCfg.BlockSyncConcurrency, + false, // Do not accept malformed indexes + true, // Enable vertical compaction + ) + if err != nil { + return errors.Wrap(err, "failed to create syncer") + } + + compactor, err := compact.NewBucketCompactor( + ulogger, + syncer, + c.tsdbCompactor, + path.Join(c.compactorCfg.DataDir, "compact"), + bucket, + // No compaction concurrency. Due to how Cortex works we don't + // expect to have multiple block groups per tenant, so setting + // a value higher than 1 would be useless. + 1, + ) + if err != nil { + return errors.Wrap(err, "failed to create bucket compactor") + } + + if err := compactor.Compact(ctx); err != nil { + return errors.Wrap(err, "compaction") + } + + blocksCleaner := compact.NewBlocksCleaner(ulogger, bucket, ignoreDeletionMarkFilter, c.compactorCfg.DeletionDelay, c.blocksCleaned, c.blockCleanupFailures) + + if err := blocksCleaner.DeleteMarkedBlocks(ctx); err != nil { + return errors.Wrap(err, "error cleaning blocks") + } + + return nil +} + +func (c *Compactor) discoverUsers(ctx context.Context) ([]string, error) { + var users []string + + err := c.bucketClient.Iter(ctx, "", func(entry string) error { + users = append(users, strings.TrimSuffix(entry, "/")) + return nil + }) + + return users, err +} + +func (c *Compactor) ownUser(userID string) (bool, error) { + // Hash the user ID. + hasher := fnv.New32a() + _, _ = hasher.Write([]byte(userID)) + userHash := hasher.Sum32() + + // Check whether this compactor instance owns the user. + rs, err := c.ring.Get(userHash, ring.Read, []ring.IngesterDesc{}) + if err != nil { + return false, err + } + + if len(rs.Ingesters) != 1 { + return false, fmt.Errorf("unexpected number of compactors in the shard (expected 1, got %d)", len(rs.Ingesters)) + } + + return rs.Ingesters[0].Addr == c.ringLifecycler.Addr, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go new file mode 100644 index 000000000000..2c0608ea056b --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go @@ -0,0 +1,53 @@ +package compactor + +import ( + "html/template" + "net/http" + + "github.com/go-kit/kit/log/level" + + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" +) + +var ( + compactorStatusPageTemplate = template.Must(template.New("main").Parse(` + + + + + Cortex Compactor Ring + + +

Cortex Compactor Ring

+

{{ .Message }}

+ + `)) +) + +func writeMessage(w http.ResponseWriter, message string) { + w.WriteHeader(http.StatusOK) + err := compactorStatusPageTemplate.Execute(w, struct { + Message string + }{Message: message}) + + if err != nil { + level.Error(util.Logger).Log("msg", "unable to serve compactor ring page", "err", err) + } +} + +func (c *Compactor) RingHandler(w http.ResponseWriter, req *http.Request) { + if !c.compactorCfg.ShardingEnabled { + writeMessage(w, "Compactor has no ring because sharding is disabled.") + return + } + + if c.State() != services.Running { + // we cannot read the ring before Compactor is in Running state, + // because that would lead to race condition. + writeMessage(w, "Compactor is not running yet.") + return + } + + c.ring.ServeHTTP(w, req) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go new file mode 100644 index 000000000000..6caa4bd501e7 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go @@ -0,0 +1,91 @@ +package compactor + +import ( + "flag" + "os" + "time" + + "github.com/go-kit/kit/log/level" + + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" +) + +// RingConfig masks the ring lifecycler config which contains +// many options not really required by the compactors ring. This config +// is used to strip down the config to the minimum, and avoid confusion +// to the user. +type RingConfig struct { + KVStore kv.Config `yaml:"kvstore"` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + + // Instance details + InstanceID string `yaml:"instance_id" doc:"hidden"` + InstanceInterfaceNames []string `yaml:"instance_interface_names" doc:"hidden"` + InstancePort int `yaml:"instance_port" doc:"hidden"` + InstanceAddr string `yaml:"instance_addr" doc:"hidden"` + + // Injected internally + ListenPort int `yaml:"-"` +} + +// RegisterFlags adds the flags required to config this to the given FlagSet +func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { + hostname, err := os.Hostname() + if err != nil { + level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err) + os.Exit(1) + } + + // Ring flags + cfg.KVStore.RegisterFlagsWithPrefix("compactor.ring.", "collectors/", f) + f.DurationVar(&cfg.HeartbeatPeriod, "compactor.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring.") + f.DurationVar(&cfg.HeartbeatTimeout, "compactor.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which compactors are considered unhealthy within the ring.") + + // Instance flags + cfg.InstanceInterfaceNames = []string{"eth0", "en0"} + f.Var((*flagext.Strings)(&cfg.InstanceInterfaceNames), "compactor.ring.instance-interface", "Name of network interface to read address from.") + f.StringVar(&cfg.InstanceAddr, "compactor.ring.instance-addr", "", "IP address to advertise in the ring.") + f.IntVar(&cfg.InstancePort, "compactor.ring.instance-port", 0, "Port to advertise in the ring (defaults to server.grpc-listen-port).") + f.StringVar(&cfg.InstanceID, "compactor.ring.instance-id", hostname, "Instance ID to register in the ring.") +} + +// ToLifecyclerConfig returns a LifecyclerConfig based on the compactor +// ring config. +func (cfg *RingConfig) ToLifecyclerConfig() ring.LifecyclerConfig { + // We have to make sure that the ring.LifecyclerConfig and ring.Config + // defaults are preserved + lc := ring.LifecyclerConfig{} + rc := ring.Config{} + + flagext.DefaultValues(&lc) + flagext.DefaultValues(&rc) + + // Configure ring + rc.KVStore = cfg.KVStore + rc.HeartbeatTimeout = cfg.HeartbeatTimeout + rc.ReplicationFactor = 1 + + // Configure lifecycler + lc.RingConfig = rc + lc.ListenPort = &cfg.ListenPort + lc.Addr = cfg.InstanceAddr + lc.Port = cfg.InstancePort + lc.ID = cfg.InstanceID + lc.InfNames = cfg.InstanceInterfaceNames + lc.SkipUnregister = false + lc.HeartbeatPeriod = cfg.HeartbeatPeriod + lc.ObservePeriod = 0 + lc.JoinAfter = 0 + lc.MinReadyDuration = 0 + lc.FinalSleep = 0 + + // We use a safe default instead of exposing to config option to the user + // in order to simplify the config. + lc.NumTokens = 512 + + return lc +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go new file mode 100644 index 000000000000..49ba8a5e4c80 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go @@ -0,0 +1,141 @@ +package compactor + +import ( + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + + "github.com/cortexproject/cortex/pkg/util" +) + +// Copied from Thanos, pkg/compact/compact.go. +// Here we aggregate metrics from all finished syncers. +type syncerMetrics struct { + metaSync prometheus.Counter + metaSyncFailures prometheus.Counter + metaSyncDuration *util.HistogramDataCollector // was prometheus.Histogram before + metaSyncConsistencyDelay prometheus.Gauge + garbageCollectedBlocks prometheus.Counter + garbageCollections prometheus.Counter + garbageCollectionFailures prometheus.Counter + garbageCollectionDuration *util.HistogramDataCollector // was prometheus.Histogram before + compactions prometheus.Counter + compactionRunsStarted prometheus.Counter + compactionRunsCompleted prometheus.Counter + compactionFailures prometheus.Counter + verticalCompactions prometheus.Counter +} + +// Copied (and modified with Cortex prefix) from Thanos, pkg/compact/compact.go +// We also ignore "group" label, since we only use a single group. +func newSyncerMetrics(reg prometheus.Registerer) *syncerMetrics { + var m syncerMetrics + + m.metaSync = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_meta_syncs_total", + Help: "TSDB Syncer: Total blocks metadata synchronization attempts.", + }) + m.metaSyncFailures = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_meta_sync_failures_total", + Help: "TSDB Syncer: Total blocks metadata synchronization failures.", + }) + m.metaSyncDuration = util.NewHistogramDataCollector(prometheus.NewDesc( + "cortex_compactor_meta_sync_duration_seconds", + "TSDB Syncer: Duration of the blocks metadata synchronization in seconds.", + nil, nil)) + m.metaSyncConsistencyDelay = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "cortex_compactor_meta_sync_consistency_delay_seconds", + Help: "TSDB Syncer: Configured consistency delay in seconds.", + }) + + m.garbageCollectedBlocks = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_garbage_collected_blocks_total", + Help: "TSDB Syncer: Total number of deleted blocks by compactor.", + }) + m.garbageCollections = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_garbage_collection_total", + Help: "TSDB Syncer: Total number of garbage collection operations.", + }) + m.garbageCollectionFailures = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_garbage_collection_failures_total", + Help: "TSDB Syncer: Total number of failed garbage collection operations.", + }) + m.garbageCollectionDuration = util.NewHistogramDataCollector(prometheus.NewDesc( + "cortex_compactor_garbage_collection_duration_seconds", + "TSDB Syncer: Time it took to perform garbage collection iteration.", + nil, nil)) + + m.compactions = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_group_compactions_total", + Help: "TSDB Syncer: Total number of group compaction attempts that resulted in a new block.", + }) + m.compactionRunsStarted = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_group_compaction_runs_started_total", + Help: "TSDB Syncer: Total number of group compaction attempts.", + }) + m.compactionRunsCompleted = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_group_compaction_runs_completed_total", + Help: "TSDB Syncer: Total number of group completed compaction runs. This also includes compactor group runs that resulted with no compaction.", + }) + m.compactionFailures = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_group_compactions_failures_total", + Help: "TSDB Syncer: Total number of failed group compactions.", + }) + m.verticalCompactions = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "cortex_compactor_group_vertical_compactions_total", + Help: "TSDB Syncer: Total number of group compaction attempts that resulted in a new block based on overlapping blocks.", + }) + + if reg != nil { + reg.MustRegister( + m.metaSync, + m.metaSyncFailures, + m.metaSyncDuration, + m.metaSyncConsistencyDelay, + m.garbageCollectedBlocks, + m.garbageCollections, + m.garbageCollectionFailures, + m.garbageCollectionDuration, + m.compactions, + m.compactionRunsStarted, + m.compactionRunsCompleted, + m.compactionFailures, + m.verticalCompactions, + ) + } + return &m +} + +func (m *syncerMetrics) gatherThanosSyncerMetrics(reg *prometheus.Registry) { + if m == nil { + return + } + + mf, err := reg.Gather() + if err != nil { + level.Warn(util.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err) + return + } + + mfm, err := util.NewMetricFamilyMap(mf) + if err != nil { + level.Warn(util.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err) + return + } + + m.metaSync.Add(mfm.SumCounters("blocks_meta_syncs_total")) + m.metaSyncFailures.Add(mfm.SumCounters("blocks_meta_sync_failures_total")) + m.metaSyncDuration.Add(mfm.SumHistograms("blocks_meta_sync_duration_seconds")) + m.metaSyncConsistencyDelay.Set(mfm.MaxGauges("consistency_delay_seconds")) + + m.garbageCollectedBlocks.Add(mfm.SumCounters("thanos_compact_garbage_collected_blocks_total")) + m.garbageCollections.Add(mfm.SumCounters("thanos_compact_garbage_collection_total")) + m.garbageCollectionFailures.Add(mfm.SumCounters("thanos_compact_garbage_collection_failures_total")) + m.garbageCollectionDuration.Add(mfm.SumHistograms("thanos_compact_garbage_collection_duration_seconds")) + + // These metrics have "group" label, but we sum them all together. + m.compactions.Add(mfm.SumCounters("thanos_compact_group_compactions_total")) + m.compactionRunsStarted.Add(mfm.SumCounters("thanos_compact_group_compaction_runs_started_total")) + m.compactionRunsCompleted.Add(mfm.SumCounters("thanos_compact_group_compaction_runs_completed_total")) + m.compactionFailures.Add(mfm.SumCounters("thanos_compact_group_compactions_failures_total")) + m.verticalCompactions.Add(mfm.SumCounters("thanos_compact_group_vertical_compactions_total")) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go new file mode 100644 index 000000000000..5418ba87fe2e --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go @@ -0,0 +1,365 @@ +package api + +import ( + "database/sql" + "encoding/json" + "errors" + "flag" + "fmt" + "html/template" + "io/ioutil" + "mime" + "net/http" + "strconv" + "strings" + + "gopkg.in/yaml.v2" + + "github.com/go-kit/kit/log/level" + "github.com/gorilla/mux" + amconfig "github.com/prometheus/alertmanager/config" + "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/configs/db" + "github.com/cortexproject/cortex/pkg/configs/userconfig" + "github.com/cortexproject/cortex/pkg/util" +) + +var ( + ErrEmailNotificationsAreDisabled = errors.New("email notifications are disabled") + ErrWebhookNotificationsAreDisabled = errors.New("webhook notifications are disabled") +) + +// Config configures Configs API +type Config struct { + Notifications NotificationsConfig `yaml:"notifications"` +} + +// NotificationsConfig configures Alertmanager notifications method. +type NotificationsConfig struct { + DisableEmail bool `yaml:"disable_email"` + DisableWebHook bool `yaml:"disable_webhook"` +} + +// RegisterFlags adds the flags required to configure this to the given FlagSet. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.BoolVar(&cfg.Notifications.DisableEmail, "configs.notifications.disable-email", false, "Disable Email notifications for Alertmanager.") + f.BoolVar(&cfg.Notifications.DisableWebHook, "configs.notifications.disable-webhook", false, "Disable WebHook notifications for Alertmanager.") +} + +// API implements the configs api. +type API struct { + http.Handler + db db.DB + cfg Config +} + +// New creates a new API +func New(database db.DB, cfg Config) *API { + a := &API{ + db: database, + cfg: cfg, + } + r := mux.NewRouter() + a.RegisterRoutes(r) + a.Handler = r + return a +} + +func (a *API) admin(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "text/html") + fmt.Fprintf(w, ` + + + configs :: configuration service + +

configs :: configuration service

+ + +`) +} + +// RegisterRoutes registers the configs API HTTP routes with the provided Router. +func (a *API) RegisterRoutes(r *mux.Router) { + for _, route := range []struct { + name, method, path string + handler http.HandlerFunc + }{ + {"root", "GET", "/", a.admin}, + // Dedicated APIs for updating rules config. In the future, these *must* + // be used. + {"get_rules", "GET", "/api/prom/configs/rules", a.getConfig}, + {"set_rules", "POST", "/api/prom/configs/rules", a.setConfig}, + {"get_templates", "GET", "/api/prom/configs/templates", a.getConfig}, + {"set_templates", "POST", "/api/prom/configs/templates", a.setConfig}, + {"get_alertmanager_config", "GET", "/api/prom/configs/alertmanager", a.getConfig}, + {"set_alertmanager_config", "POST", "/api/prom/configs/alertmanager", a.setConfig}, + {"validate_alertmanager_config", "POST", "/api/prom/configs/alertmanager/validate", a.validateAlertmanagerConfig}, + {"deactivate_config", "DELETE", "/api/prom/configs/deactivate", a.deactivateConfig}, + {"restore_config", "POST", "/api/prom/configs/restore", a.restoreConfig}, + // Internal APIs. + {"private_get_rules", "GET", "/private/api/prom/configs/rules", a.getConfigs}, + {"private_get_alertmanager_config", "GET", "/private/api/prom/configs/alertmanager", a.getConfigs}, + } { + r.Handle(route.path, route.handler).Methods(route.method).Name(route.name) + } +} + +// getConfig returns the request configuration. +func (a *API) getConfig(w http.ResponseWriter, r *http.Request) { + userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + if err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + logger := util.WithContext(r.Context(), util.Logger) + + cfg, err := a.db.GetConfig(r.Context(), userID) + if err == sql.ErrNoRows { + http.Error(w, "No configuration", http.StatusNotFound) + return + } else if err != nil { + // XXX: Untested + level.Error(logger).Log("msg", "error getting config", "err", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + switch parseConfigFormat(r.Header.Get("Accept"), FormatJSON) { + case FormatJSON: + w.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w).Encode(cfg) + case FormatYAML: + w.Header().Set("Content-Type", "application/yaml") + err = yaml.NewEncoder(w).Encode(cfg) + default: + // should never reach this point + level.Error(logger).Log("msg", "unexpected error detecting the config format") + http.Error(w, err.Error(), http.StatusInternalServerError) + } + if err != nil { + // XXX: Untested + level.Error(logger).Log("msg", "error encoding config", "err", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func (a *API) setConfig(w http.ResponseWriter, r *http.Request) { + userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + if err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + logger := util.WithContext(r.Context(), util.Logger) + + var cfg userconfig.Config + switch parseConfigFormat(r.Header.Get("Content-Type"), FormatJSON) { + case FormatJSON: + if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { + // XXX: Untested + level.Error(logger).Log("msg", "error decoding json body", "err", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + case FormatYAML: + if err := yaml.NewDecoder(r.Body).Decode(&cfg); err != nil { + // XXX: Untested + level.Error(logger).Log("msg", "error decoding yaml body", "err", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + default: + // should never reach this point + level.Error(logger).Log("msg", "unexpected error detecting the config format") + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if err := validateAlertmanagerConfig(cfg.AlertmanagerConfig, a.cfg.Notifications); err != nil && cfg.AlertmanagerConfig != "" { + level.Error(logger).Log("msg", "invalid Alertmanager config", "err", err) + http.Error(w, fmt.Sprintf("Invalid Alertmanager config: %v", err), http.StatusBadRequest) + return + } + if err := validateRulesFiles(cfg); err != nil { + level.Error(logger).Log("msg", "invalid rules", "err", err) + http.Error(w, fmt.Sprintf("Invalid rules: %v", err), http.StatusBadRequest) + return + } + if err := validateTemplateFiles(cfg); err != nil { + level.Error(logger).Log("msg", "invalid templates", "err", err) + http.Error(w, fmt.Sprintf("Invalid templates: %v", err), http.StatusBadRequest) + return + } + if err := a.db.SetConfig(r.Context(), userID, cfg); err != nil { + // XXX: Untested + level.Error(logger).Log("msg", "error storing config", "err", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) +} + +func (a *API) validateAlertmanagerConfig(w http.ResponseWriter, r *http.Request) { + logger := util.WithContext(r.Context(), util.Logger) + cfg, err := ioutil.ReadAll(r.Body) + if err != nil { + level.Error(logger).Log("msg", "error reading request body", "err", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if err = validateAlertmanagerConfig(string(cfg), a.cfg.Notifications); err != nil { + w.WriteHeader(http.StatusBadRequest) + util.WriteJSONResponse(w, map[string]string{ + "status": "error", + "error": err.Error(), + }) + return + } + + util.WriteJSONResponse(w, map[string]string{ + "status": "success", + }) +} + +func validateAlertmanagerConfig(cfg string, noCfg NotificationsConfig) error { + amCfg, err := amconfig.Load(cfg) + if err != nil { + return err + } + + for _, recv := range amCfg.Receivers { + if noCfg.DisableEmail && len(recv.EmailConfigs) > 0 { + return ErrEmailNotificationsAreDisabled + } + if noCfg.DisableWebHook && len(recv.WebhookConfigs) > 0 { + return ErrWebhookNotificationsAreDisabled + } + } + + return nil +} + +func validateRulesFiles(c userconfig.Config) error { + _, err := c.RulesConfig.Parse() + return err +} + +func validateTemplateFiles(c userconfig.Config) error { + for fn, content := range c.TemplateFiles { + if _, err := template.New(fn).Parse(content); err != nil { + return err + } + } + + return nil +} + +// ConfigsView renders multiple configurations, mapping userID to userconfig.View. +// Exposed only for tests. +type ConfigsView struct { + Configs map[string]userconfig.View `json:"configs"` +} + +func (a *API) getConfigs(w http.ResponseWriter, r *http.Request) { + var cfgs map[string]userconfig.View + var cfgErr error + logger := util.WithContext(r.Context(), util.Logger) + rawSince := r.FormValue("since") + if rawSince == "" { + cfgs, cfgErr = a.db.GetAllConfigs(r.Context()) + } else { + since, err := strconv.ParseUint(rawSince, 10, 0) + if err != nil { + level.Info(logger).Log("msg", "invalid config ID", "err", err) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + cfgs, cfgErr = a.db.GetConfigs(r.Context(), userconfig.ID(since)) + } + + if cfgErr != nil { + // XXX: Untested + level.Error(logger).Log("msg", "error getting configs", "err", cfgErr) + http.Error(w, cfgErr.Error(), http.StatusInternalServerError) + return + } + + view := ConfigsView{Configs: cfgs} + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(view); err != nil { + // XXX: Untested + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func (a *API) deactivateConfig(w http.ResponseWriter, r *http.Request) { + userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + if err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + logger := util.WithContext(r.Context(), util.Logger) + + if err := a.db.DeactivateConfig(r.Context(), userID); err != nil { + if err == sql.ErrNoRows { + level.Info(logger).Log("msg", "deactivate config - no configuration", "userID", userID) + http.Error(w, "No configuration", http.StatusNotFound) + return + } + level.Error(logger).Log("msg", "error deactivating config", "err", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + level.Info(logger).Log("msg", "config deactivated", "userID", userID) + w.WriteHeader(http.StatusOK) +} + +func (a *API) restoreConfig(w http.ResponseWriter, r *http.Request) { + userID, _, err := user.ExtractOrgIDFromHTTPRequest(r) + if err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + logger := util.WithContext(r.Context(), util.Logger) + + if err := a.db.RestoreConfig(r.Context(), userID); err != nil { + if err == sql.ErrNoRows { + level.Info(logger).Log("msg", "restore config - no configuration", "userID", userID) + http.Error(w, "No configuration", http.StatusNotFound) + return + } + level.Error(logger).Log("msg", "error restoring config", "err", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + level.Info(logger).Log("msg", "config restored", "userID", userID) + w.WriteHeader(http.StatusOK) +} + +const ( + FormatInvalid = "invalid" + FormatJSON = "json" + FormatYAML = "yaml" +) + +func parseConfigFormat(v string, defaultFormat string) string { + if v == "" { + return defaultFormat + } + parts := strings.Split(v, ",") + for _, part := range parts { + mimeType, _, err := mime.ParseMediaType(part) + if err != nil { + continue + } + switch mimeType { + case "application/json": + return FormatJSON + case "text/yaml", "text/x-yaml", "application/yaml", "application/x-yaml": + return FormatYAML + } + } + return defaultFormat +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go b/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go new file mode 100644 index 000000000000..a020bb54f023 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go @@ -0,0 +1,154 @@ +package client + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/cortexproject/cortex/pkg/configs/userconfig" + + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/weaveworks/common/instrument" + + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" +) + +// Config says where we can find the ruler userconfig. +type Config struct { + ConfigsAPIURL flagext.URLValue `yaml:"configs_api_url"` + ClientTimeout time.Duration `yaml:"client_timeout"` // HTTP timeout duration for requests made to the Weave Cloud configs service. +} + +// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.Var(&cfg.ConfigsAPIURL, prefix+"configs.url", "URL of configs API server.") + f.DurationVar(&cfg.ClientTimeout, prefix+"configs.client-timeout", 5*time.Second, "Timeout for requests to Weave Cloud configs service.") +} + +var configsRequestDuration = instrument.NewHistogramCollector(promauto.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "cortex", + Name: "configs_request_duration_seconds", + Help: "Time spent requesting userconfig.", + Buckets: prometheus.DefBuckets, +}, []string{"operation", "status_code"})) + +// Client is what the ruler and altermanger needs from a config store to process rules. +type Client interface { + // GetRules returns all Cortex configurations from a configs API server + // that have been updated after the given userconfig.ID was last updated. + GetRules(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) + + // GetAlerts fetches all the alerts that have changes since since. + GetAlerts(ctx context.Context, since userconfig.ID) (*ConfigsResponse, error) +} + +// New creates a new ConfigClient. +func New(cfg Config) (*ConfigDBClient, error) { + return &ConfigDBClient{ + URL: cfg.ConfigsAPIURL.URL, + Timeout: cfg.ClientTimeout, + }, nil +} + +// ConfigDBClient allows retrieving recording and alerting rules from the configs server. +type ConfigDBClient struct { + URL *url.URL + Timeout time.Duration +} + +// GetRules implements Client +func (c ConfigDBClient) GetRules(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) { + suffix := "" + if since != 0 { + suffix = fmt.Sprintf("?since=%d", since) + } + endpoint := fmt.Sprintf("%s/private/api/prom/configs/rules%s", c.URL.String(), suffix) + var response *ConfigsResponse + err := instrument.CollectedRequest(ctx, "GetRules", configsRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + var err error + response, err = doRequest(endpoint, c.Timeout, since) + return err + }) + if err != nil { + return nil, err + } + configs := map[string]userconfig.VersionedRulesConfig{} + for id, view := range response.Configs { + cfg := view.GetVersionedRulesConfig() + if cfg != nil { + configs[id] = *cfg + } + } + return configs, nil +} + +// GetAlerts implements Client. +func (c ConfigDBClient) GetAlerts(ctx context.Context, since userconfig.ID) (*ConfigsResponse, error) { + suffix := "" + if since != 0 { + suffix = fmt.Sprintf("?since=%d", since) + } + endpoint := fmt.Sprintf("%s/private/api/prom/configs/alertmanager%s", c.URL.String(), suffix) + var response *ConfigsResponse + err := instrument.CollectedRequest(ctx, "GetAlerts", configsRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + var err error + response, err = doRequest(endpoint, c.Timeout, since) + return err + }) + return response, err +} + +func doRequest(endpoint string, timeout time.Duration, since userconfig.ID) (*ConfigsResponse, error) { + req, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return nil, err + } + + client := &http.Client{Timeout: timeout} + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("Invalid response from configs server: %v", resp.StatusCode) + } + + var config ConfigsResponse + if err := json.NewDecoder(resp.Body).Decode(&config); err != nil { + level.Error(util.Logger).Log("msg", "configs: couldn't decode JSON body", "err", err) + return nil, err + } + + config.since = since + return &config, nil +} + +// ConfigsResponse is a response from server for Getuserconfig. +type ConfigsResponse struct { + // The version since which these configs were changed + since userconfig.ID + + // Configs maps user ID to their latest userconfig.View. + Configs map[string]userconfig.View `json:"configs"` +} + +// GetLatestConfigID returns the last config ID from a set of userconfig. +func (c ConfigsResponse) GetLatestConfigID() userconfig.ID { + latest := c.since + for _, config := range c.Configs { + if config.ID > latest { + latest = config.ID + } + } + return latest +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/config.go b/vendor/github.com/cortexproject/cortex/pkg/configs/config.go new file mode 100644 index 000000000000..9ec1d68b6489 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/config.go @@ -0,0 +1,19 @@ +package configs + +import ( + "flag" + + "github.com/cortexproject/cortex/pkg/configs/api" + "github.com/cortexproject/cortex/pkg/configs/db" +) + +type Config struct { + DB db.Config `yaml:"database"` + API api.Config `yaml:"api"` +} + +// RegisterFlags adds the flags required to configure this to the given FlagSet. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.DB.RegisterFlags(f) + cfg.API.RegisterFlags(f) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/db.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/db.go new file mode 100644 index 000000000000..0ec290494c79 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/db/db.go @@ -0,0 +1,96 @@ +package db + +import ( + "context" + "flag" + "fmt" + "io/ioutil" + "net/url" + + "github.com/cortexproject/cortex/pkg/configs/db/memory" + "github.com/cortexproject/cortex/pkg/configs/db/postgres" + "github.com/cortexproject/cortex/pkg/configs/userconfig" +) + +// Config configures the database. +type Config struct { + URI string `yaml:"uri"` + MigrationsDir string `yaml:"migrations_dir"` + PasswordFile string `yaml:"password_file"` + + // Allow injection of mock DBs for unit testing. + Mock DB `yaml:"-"` +} + +// RegisterFlags adds the flags required to configure this to the given FlagSet. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.URI, "configs.database.uri", "postgres://postgres@configs-db.weave.local/configs?sslmode=disable", "URI where the database can be found (for dev you can use memory://)") + f.StringVar(&cfg.MigrationsDir, "configs.database.migrations-dir", "", "Path where the database migration files can be found") + f.StringVar(&cfg.PasswordFile, "configs.database.password-file", "", "File containing password (username goes in URI)") +} + +// DB is the interface for the database. +type DB interface { + // GetRulesConfig gets the user's ruler config + GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) + + // SetRulesConfig does a compare-and-swap (CAS) on the user's rules config. + // `oldConfig` must precisely match the current config in order to change the config to `newConfig`. + // Will return `true` if the config was updated, `false` otherwise. + SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) + + // GetAllRulesConfigs gets all of the ruler configs + GetAllRulesConfigs(ctx context.Context) (map[string]userconfig.VersionedRulesConfig, error) + + // GetRulesConfigs gets all of the configs that have been added or have + // changed since the provided config. + GetRulesConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) + + GetConfig(ctx context.Context, userID string) (userconfig.View, error) + SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error + + GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) + GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) + + DeactivateConfig(ctx context.Context, userID string) error + RestoreConfig(ctx context.Context, userID string) error + + Close() error +} + +// New creates a new database. +func New(cfg Config) (DB, error) { + if cfg.Mock != nil { + return cfg.Mock, nil + } + + u, err := url.Parse(cfg.URI) + if err != nil { + return nil, err + } + + if len(cfg.PasswordFile) != 0 { + if u.User == nil { + return nil, fmt.Errorf("--database.password-file requires username in --database.uri") + } + passwordBytes, err := ioutil.ReadFile(cfg.PasswordFile) + if err != nil { + return nil, fmt.Errorf("Could not read database password file: %v", err) + } + u.User = url.UserPassword(u.User.Username(), string(passwordBytes)) + } + + var d DB + switch u.Scheme { + case "memory": + d, err = memory.New(u.String(), cfg.MigrationsDir) + case "postgres": + d, err = postgres.New(u.String(), cfg.MigrationsDir) + default: + return nil, fmt.Errorf("Unknown database type: %s", u.Scheme) + } + if err != nil { + return nil, err + } + return traced{timed{d}}, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/memory/memory.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/memory/memory.go new file mode 100644 index 000000000000..a759c425bd03 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/db/memory/memory.go @@ -0,0 +1,145 @@ +package memory + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/cortexproject/cortex/pkg/configs/userconfig" +) + +// DB is an in-memory database for testing, and local development +type DB struct { + cfgs map[string]userconfig.View + id uint +} + +// New creates a new in-memory database +func New(_, _ string) (*DB, error) { + return &DB{ + cfgs: map[string]userconfig.View{}, + id: 0, + }, nil +} + +// GetConfig gets the user's configuration. +func (d *DB) GetConfig(ctx context.Context, userID string) (userconfig.View, error) { + c, ok := d.cfgs[userID] + if !ok { + return userconfig.View{}, sql.ErrNoRows + } + return c, nil +} + +// SetConfig sets configuration for a user. +func (d *DB) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error { + if !cfg.RulesConfig.FormatVersion.IsValid() { + return fmt.Errorf("invalid rule format version %v", cfg.RulesConfig.FormatVersion) + } + d.cfgs[userID] = userconfig.View{Config: cfg, ID: userconfig.ID(d.id)} + d.id++ + return nil +} + +// GetAllConfigs gets all of the userconfig. +func (d *DB) GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) { + return d.cfgs, nil +} + +// GetConfigs gets all of the configs that have changed recently. +func (d *DB) GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) { + cfgs := map[string]userconfig.View{} + for user, c := range d.cfgs { + if c.ID > since { + cfgs[user] = c + } + } + return cfgs, nil +} + +// SetDeletedAtConfig sets a deletedAt for configuration +// by adding a single new row with deleted_at set +// the same as SetConfig is actually insert +func (d *DB) SetDeletedAtConfig(ctx context.Context, userID string, deletedAt time.Time) error { + cv, err := d.GetConfig(ctx, userID) + if err != nil { + return err + } + cv.DeletedAt = deletedAt + cv.ID = userconfig.ID(d.id) + d.cfgs[userID] = cv + d.id++ + return nil +} + +// DeactivateConfig deactivates configuration for a user by creating new configuration with DeletedAt set to now +func (d *DB) DeactivateConfig(ctx context.Context, userID string) error { + return d.SetDeletedAtConfig(ctx, userID, time.Now()) +} + +// RestoreConfig restores deactivated configuration for a user by creating new configuration with empty DeletedAt +func (d *DB) RestoreConfig(ctx context.Context, userID string) error { + return d.SetDeletedAtConfig(ctx, userID, time.Time{}) +} + +// Close finishes using the db. Noop. +func (d *DB) Close() error { + return nil +} + +// GetRulesConfig gets the rules config for a user. +func (d *DB) GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) { + c, ok := d.cfgs[userID] + if !ok { + return userconfig.VersionedRulesConfig{}, sql.ErrNoRows + } + cfg := c.GetVersionedRulesConfig() + if cfg == nil { + return userconfig.VersionedRulesConfig{}, sql.ErrNoRows + } + return *cfg, nil +} + +// SetRulesConfig sets the rules config for a user. +func (d *DB) SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) { + c, ok := d.cfgs[userID] + if !ok { + return true, d.SetConfig(ctx, userID, userconfig.Config{RulesConfig: newConfig}) + } + if !oldConfig.Equal(c.Config.RulesConfig) { + return false, nil + } + return true, d.SetConfig(ctx, userID, userconfig.Config{ + AlertmanagerConfig: c.Config.AlertmanagerConfig, + RulesConfig: newConfig, + }) +} + +// GetAllRulesConfigs gets the rules configs for all users that have them. +func (d *DB) GetAllRulesConfigs(ctx context.Context) (map[string]userconfig.VersionedRulesConfig, error) { + cfgs := map[string]userconfig.VersionedRulesConfig{} + for user, c := range d.cfgs { + cfg := c.GetVersionedRulesConfig() + if cfg != nil { + cfgs[user] = *cfg + } + } + return cfgs, nil +} + +// GetRulesConfigs gets the rules configs that have changed +// since the given config version. +func (d *DB) GetRulesConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) { + cfgs := map[string]userconfig.VersionedRulesConfig{} + for user, c := range d.cfgs { + if c.ID <= since { + continue + } + cfg := c.GetVersionedRulesConfig() + if cfg != nil { + cfgs[user] = *cfg + } + } + return cfgs, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go new file mode 100644 index 000000000000..6476891f6255 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go @@ -0,0 +1,358 @@ +package postgres + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/cortexproject/cortex/pkg/configs/userconfig" + + "github.com/Masterminds/squirrel" + "github.com/go-kit/kit/log/level" + "github.com/golang-migrate/migrate/v4" + _ "github.com/golang-migrate/migrate/v4/database/postgres" // Import the postgres migrations driver + _ "github.com/golang-migrate/migrate/v4/source/file" // Import the postgres migrations driver + "github.com/lib/pq" + _ "github.com/lib/pq" // Import the postgres sql driver + "github.com/pkg/errors" + + "github.com/cortexproject/cortex/pkg/util" +) + +const ( + // TODO: These are a legacy from when configs was more general. Update the + // schema so this isn't needed. + entityType = "org" + subsystem = "cortex" + // timeout waiting for database connection to be established + dbTimeout = 5 * time.Minute +) + +var ( + allConfigs = squirrel.Eq{ + "owner_type": entityType, + "subsystem": subsystem, + } +) + +// DB is a postgres db, for dev and production +type DB struct { + dbProxy + squirrel.StatementBuilderType +} + +type dbProxy interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Query(query string, args ...interface{}) (*sql.Rows, error) + QueryRow(query string, args ...interface{}) *sql.Row + Prepare(query string) (*sql.Stmt, error) +} + +// dbWait waits for database connection to be established +func dbWait(db *sql.DB) error { + deadline := time.Now().Add(dbTimeout) + var err error + for tries := 0; time.Now().Before(deadline); tries++ { + err = db.Ping() + if err == nil { + return nil + } + level.Warn(util.Logger).Log("msg", "db connection not established, retrying...", "error", err) + time.Sleep(time.Second << uint(tries)) + } + return errors.Wrapf(err, "db connection not established after %s", dbTimeout) +} + +// New creates a new postgres DB +func New(uri, migrationsDir string) (DB, error) { + db, err := sql.Open("postgres", uri) + if err != nil { + return DB{}, errors.Wrap(err, "cannot open postgres db") + } + + if err := dbWait(db); err != nil { + return DB{}, errors.Wrap(err, "cannot establish db connection") + } + + if migrationsDir != "" { + // Add file scheme if no scheme is present + if !strings.HasPrefix(migrationsDir, "file:") { + migrationsDir = "file:" + migrationsDir + } + + m, err := migrate.New(migrationsDir, uri) + if err != nil { + return DB{}, errors.Wrap(err, "database migrations initialization failed") + } + + level.Info(util.Logger).Log("msg", "running database migrations...") + + if err := m.Up(); err != nil { + if err != migrate.ErrNoChange { + return DB{}, errors.Wrap(err, "database migrations failed") + } + level.Debug(util.Logger).Log("msg", "no change in schema, error (ignored)", "error", err) + } + } + + return DB{ + dbProxy: db, + StatementBuilderType: statementBuilder(db), + }, err +} + +var statementBuilder = squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar).RunWith + +func (d DB) findConfigs(filter squirrel.Sqlizer) (map[string]userconfig.View, error) { + rows, err := d.Select("id", "owner_id", "config", "deleted_at"). + Options("DISTINCT ON (owner_id)"). + From("configs"). + Where(filter). + OrderBy("owner_id, id DESC"). + Query() + if err != nil { + return nil, err + } + defer rows.Close() + cfgs := map[string]userconfig.View{} + for rows.Next() { + var cfg userconfig.View + var cfgBytes []byte + var userID string + var deletedAt pq.NullTime + err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &deletedAt) + if err != nil { + return nil, err + } + err = json.Unmarshal(cfgBytes, &cfg.Config) + if err != nil { + return nil, err + } + cfg.DeletedAt = deletedAt.Time + cfgs[userID] = cfg + } + return cfgs, nil +} + +// GetConfig gets a configuration. +func (d DB) GetConfig(ctx context.Context, userID string) (userconfig.View, error) { + var cfgView userconfig.View + var cfgBytes []byte + var deletedAt pq.NullTime + err := d.Select("id", "config", "deleted_at"). + From("configs"). + Where(squirrel.And{allConfigs, squirrel.Eq{"owner_id": userID}}). + OrderBy("id DESC"). + Limit(1). + QueryRow().Scan(&cfgView.ID, &cfgBytes, &deletedAt) + if err != nil { + return cfgView, err + } + cfgView.DeletedAt = deletedAt.Time + err = json.Unmarshal(cfgBytes, &cfgView.Config) + return cfgView, err +} + +// SetConfig sets a configuration. +func (d DB) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error { + if !cfg.RulesConfig.FormatVersion.IsValid() { + return fmt.Errorf("invalid rule format version %v", cfg.RulesConfig.FormatVersion) + } + cfgBytes, err := json.Marshal(cfg) + if err != nil { + return err + } + + _, err = d.Insert("configs"). + Columns("owner_id", "owner_type", "subsystem", "config"). + Values(userID, entityType, subsystem, cfgBytes). + Exec() + return err +} + +// GetAllConfigs gets all of the userconfig. +func (d DB) GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) { + return d.findConfigs(allConfigs) +} + +// GetConfigs gets all of the configs that have changed recently. +func (d DB) GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) { + return d.findConfigs(squirrel.And{ + allConfigs, + squirrel.Gt{"id": since}, + }) +} + +// GetRulesConfig gets the latest alertmanager config for a user. +func (d DB) GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) { + current, err := d.GetConfig(ctx, userID) + if err != nil { + return userconfig.VersionedRulesConfig{}, err + } + cfg := current.GetVersionedRulesConfig() + if cfg == nil { + return userconfig.VersionedRulesConfig{}, sql.ErrNoRows + } + return *cfg, nil +} + +// SetRulesConfig sets the current alertmanager config for a user. +func (d DB) SetRulesConfig(ctx context.Context, userID string, oldConfig, newConfig userconfig.RulesConfig) (bool, error) { + updated := false + err := d.Transaction(func(tx DB) error { + current, err := d.GetConfig(ctx, userID) + if err != nil && err != sql.ErrNoRows { + return err + } + // The supplied oldConfig must match the current config. If no config + // exists, then oldConfig must be nil. Otherwise, it must exactly + // equal the existing config. + if !((err == sql.ErrNoRows && oldConfig.Files == nil) || oldConfig.Equal(current.Config.RulesConfig)) { + return nil + } + new := userconfig.Config{ + AlertmanagerConfig: current.Config.AlertmanagerConfig, + RulesConfig: newConfig, + } + updated = true + return d.SetConfig(ctx, userID, new) + }) + return updated, err +} + +// findRulesConfigs helps GetAllRulesConfigs and GetRulesConfigs retrieve the +// set of all active rules configurations across all our users. +func (d DB) findRulesConfigs(filter squirrel.Sqlizer) (map[string]userconfig.VersionedRulesConfig, error) { + rows, err := d.Select("id", "owner_id", "config ->> 'rules_files'", "config ->> 'rule_format_version'", "deleted_at"). + Options("DISTINCT ON (owner_id)"). + From("configs"). + Where(filter). + // `->>` gets a JSON object field as text. When a config row exists + // and alertmanager config is provided but ruler config has not yet + // been, the 'rules_files' key will have an empty JSON object as its + // value. This is (probably) the most efficient way to test for a + // non-empty `rules_files` key. + // + // This whole situation is way too complicated. See + // https://github.com/cortexproject/cortex/issues/619 for the whole + // story, and our plans to improve it. + Where("config ->> 'rules_files' <> '{}'"). + OrderBy("owner_id, id DESC"). + Query() + if err != nil { + return nil, err + } + defer rows.Close() + cfgs := map[string]userconfig.VersionedRulesConfig{} + for rows.Next() { + var cfg userconfig.VersionedRulesConfig + var userID string + var cfgBytes []byte + var rfvBytes []byte + var deletedAt pq.NullTime + err = rows.Scan(&cfg.ID, &userID, &cfgBytes, &rfvBytes, &deletedAt) + if err != nil { + return nil, err + } + err = json.Unmarshal(cfgBytes, &cfg.Config.Files) + if err != nil { + return nil, err + } + // Legacy configs don't have a rule format version, in which case this will + // be a zero-length (but non-nil) slice. + if len(rfvBytes) > 0 { + err = json.Unmarshal([]byte(`"`+string(rfvBytes)+`"`), &cfg.Config.FormatVersion) + if err != nil { + return nil, err + } + } + cfg.DeletedAt = deletedAt.Time + cfgs[userID] = cfg + } + return cfgs, nil +} + +// GetAllRulesConfigs gets all alertmanager configs for all users. +func (d DB) GetAllRulesConfigs(ctx context.Context) (map[string]userconfig.VersionedRulesConfig, error) { + return d.findRulesConfigs(allConfigs) +} + +// GetRulesConfigs gets all the alertmanager configs that have changed since a given config. +func (d DB) GetRulesConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) { + return d.findRulesConfigs(squirrel.And{ + allConfigs, + squirrel.Gt{"id": since}, + }) +} + +// SetDeletedAtConfig sets a deletedAt for configuration +// by adding a single new row with deleted_at set +// the same as SetConfig is actually insert +func (d DB) SetDeletedAtConfig(ctx context.Context, userID string, deletedAt pq.NullTime, cfg userconfig.Config) error { + cfgBytes, err := json.Marshal(cfg) + if err != nil { + return err + } + _, err = d.Insert("configs"). + Columns("owner_id", "owner_type", "subsystem", "deleted_at", "config"). + Values(userID, entityType, subsystem, deletedAt, cfgBytes). + Exec() + return err +} + +// DeactivateConfig deactivates a configuration. +func (d DB) DeactivateConfig(ctx context.Context, userID string) error { + cfg, err := d.GetConfig(ctx, userID) + if err != nil { + return err + } + return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{Time: time.Now(), Valid: true}, cfg.Config) +} + +// RestoreConfig restores configuration. +func (d DB) RestoreConfig(ctx context.Context, userID string) error { + cfg, err := d.GetConfig(ctx, userID) + if err != nil { + return err + } + return d.SetDeletedAtConfig(ctx, userID, pq.NullTime{}, cfg.Config) +} + +// Transaction runs the given function in a postgres transaction. If fn returns +// an error the txn will be rolled back. +func (d DB) Transaction(f func(DB) error) error { + if _, ok := d.dbProxy.(*sql.Tx); ok { + // Already in a nested transaction + return f(d) + } + + tx, err := d.dbProxy.(*sql.DB).Begin() + if err != nil { + return err + } + err = f(DB{ + dbProxy: tx, + StatementBuilderType: statementBuilder(tx), + }) + if err != nil { + // Rollback error is ignored as we already have one in progress + if err2 := tx.Rollback(); err2 != nil { + level.Warn(util.Logger).Log("msg", "transaction rollback error (ignored)", "error", err2) + } + return err + } + return tx.Commit() +} + +// Close finishes using the db +func (d DB) Close() error { + if db, ok := d.dbProxy.(interface { + Close() error + }); ok { + return db.Close() + } + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/timed.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/timed.go new file mode 100644 index 000000000000..58cbfdc9e194 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/db/timed.go @@ -0,0 +1,128 @@ +package db + +import ( + "context" + + "github.com/prometheus/client_golang/prometheus" + "github.com/weaveworks/common/instrument" + + "github.com/cortexproject/cortex/pkg/configs/userconfig" +) + +var ( + databaseRequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "cortex", + Name: "database_request_duration_seconds", + Help: "Time spent (in seconds) doing database requests.", + Buckets: prometheus.DefBuckets, + }, []string{"method", "status_code"})) +) + +func init() { + databaseRequestDuration.Register() +} + +// timed adds prometheus timings to another database implementation +type timed struct { + d DB +} + +func (t timed) GetConfig(ctx context.Context, userID string) (userconfig.View, error) { + var cfg userconfig.View + err := instrument.CollectedRequest(ctx, "DB.GetConfigs", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + var err error + cfg, err = t.d.GetConfig(ctx, userID) // Warning: this will produce an incorrect result if the configID ever overflows + return err + }) + return cfg, err +} + +func (t timed) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) error { + return instrument.CollectedRequest(ctx, "DB.SetConfig", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return t.d.SetConfig(ctx, userID, cfg) // Warning: this will produce an incorrect result if the configID ever overflows + }) +} + +func (t timed) GetAllConfigs(ctx context.Context) (map[string]userconfig.View, error) { + var cfgs map[string]userconfig.View + err := instrument.CollectedRequest(ctx, "DB.GetAllConfigs", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + var err error + cfgs, err = t.d.GetAllConfigs(ctx) + return err + }) + + return cfgs, err +} + +func (t timed) GetConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.View, error) { + var cfgs map[string]userconfig.View + err := instrument.CollectedRequest(ctx, "DB.GetConfigs", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + var err error + cfgs, err = t.d.GetConfigs(ctx, since) + return err + }) + + return cfgs, err +} + +func (t timed) DeactivateConfig(ctx context.Context, userID string) error { + return instrument.CollectedRequest(ctx, "DB.DeactivateConfig", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return t.d.DeactivateConfig(ctx, userID) + }) +} + +func (t timed) RestoreConfig(ctx context.Context, userID string) (err error) { + return instrument.CollectedRequest(ctx, "DB.RestoreConfig", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return t.d.RestoreConfig(ctx, userID) + }) +} + +func (t timed) Close() error { + return instrument.CollectedRequest(context.Background(), "DB.Close", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return t.d.Close() + }) +} + +func (t timed) GetRulesConfig(ctx context.Context, userID string) (userconfig.VersionedRulesConfig, error) { + var cfg userconfig.VersionedRulesConfig + err := instrument.CollectedRequest(ctx, "DB.GetRulesConfig", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + var err error + cfg, err = t.d.GetRulesConfig(ctx, userID) + return err + }) + + return cfg, err +} + +func (t timed) SetRulesConfig(ctx context.Context, userID string, oldCfg, newCfg userconfig.RulesConfig) (bool, error) { + var updated bool + err := instrument.CollectedRequest(ctx, "DB.SetRulesConfig", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + var err error + updated, err = t.d.SetRulesConfig(ctx, userID, oldCfg, newCfg) + return err + }) + + return updated, err +} + +func (t timed) GetAllRulesConfigs(ctx context.Context) (map[string]userconfig.VersionedRulesConfig, error) { + var cfgs map[string]userconfig.VersionedRulesConfig + err := instrument.CollectedRequest(ctx, "DB.GetAllRulesConfigs", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + var err error + cfgs, err = t.d.GetAllRulesConfigs(ctx) + return err + }) + + return cfgs, err +} + +func (t timed) GetRulesConfigs(ctx context.Context, since userconfig.ID) (map[string]userconfig.VersionedRulesConfig, error) { + var cfgs map[string]userconfig.VersionedRulesConfig + err := instrument.CollectedRequest(ctx, "DB.GetRulesConfigs", databaseRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + var err error + cfgs, err = t.d.GetRulesConfigs(ctx, since) + return err + }) + + return cfgs, err +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go new file mode 100644 index 000000000000..7a2cc3aac613 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go @@ -0,0 +1,76 @@ +package db + +import ( + "context" + "fmt" + + "github.com/cortexproject/cortex/pkg/configs/userconfig" + + "github.com/go-kit/kit/log/level" + + "github.com/cortexproject/cortex/pkg/util" +) + +// traced adds log trace lines on each db call +type traced struct { + d DB +} + +func (t traced) trace(name string, args ...interface{}) { + level.Debug(util.Logger).Log("msg", fmt.Sprintf("%s: %#v", name, args)) +} + +func (t traced) GetConfig(ctx context.Context, userID string) (cfg userconfig.View, err error) { + defer func() { t.trace("GetConfig", userID, cfg, err) }() + return t.d.GetConfig(ctx, userID) +} + +func (t traced) SetConfig(ctx context.Context, userID string, cfg userconfig.Config) (err error) { + defer func() { t.trace("SetConfig", userID, cfg, err) }() + return t.d.SetConfig(ctx, userID, cfg) +} + +func (t traced) GetAllConfigs(ctx context.Context) (cfgs map[string]userconfig.View, err error) { + defer func() { t.trace("GetAllConfigs", cfgs, err) }() + return t.d.GetAllConfigs(ctx) +} + +func (t traced) GetConfigs(ctx context.Context, since userconfig.ID) (cfgs map[string]userconfig.View, err error) { + defer func() { t.trace("GetConfigs", since, cfgs, err) }() + return t.d.GetConfigs(ctx, since) +} + +func (t traced) DeactivateConfig(ctx context.Context, userID string) (err error) { + defer func() { t.trace("DeactivateConfig", userID, err) }() + return t.d.DeactivateConfig(ctx, userID) +} + +func (t traced) RestoreConfig(ctx context.Context, userID string) (err error) { + defer func() { t.trace("RestoreConfig", userID, err) }() + return t.d.RestoreConfig(ctx, userID) +} + +func (t traced) Close() (err error) { + defer func() { t.trace("Close", err) }() + return t.d.Close() +} + +func (t traced) GetRulesConfig(ctx context.Context, userID string) (cfg userconfig.VersionedRulesConfig, err error) { + defer func() { t.trace("GetRulesConfig", userID, cfg, err) }() + return t.d.GetRulesConfig(ctx, userID) +} + +func (t traced) SetRulesConfig(ctx context.Context, userID string, oldCfg, newCfg userconfig.RulesConfig) (updated bool, err error) { + defer func() { t.trace("SetRulesConfig", userID, oldCfg, newCfg, updated, err) }() + return t.d.SetRulesConfig(ctx, userID, oldCfg, newCfg) +} + +func (t traced) GetAllRulesConfigs(ctx context.Context) (cfgs map[string]userconfig.VersionedRulesConfig, err error) { + defer func() { t.trace("GetAllRulesConfigs", cfgs, err) }() + return t.d.GetAllRulesConfigs(ctx) +} + +func (t traced) GetRulesConfigs(ctx context.Context, since userconfig.ID) (cfgs map[string]userconfig.VersionedRulesConfig, err error) { + defer func() { t.trace("GetConfigs", since, cfgs, err) }() + return t.d.GetRulesConfigs(ctx, since) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/ast.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/ast.go new file mode 100644 index 000000000000..5e1f2989f722 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/ast.go @@ -0,0 +1,341 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//nolint //Since this was copied from Prometheus leave it as is +package promql + +import ( + "fmt" + "time" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/storage" +) + +// Node is a generic interface for all nodes in an AST. +// +// Whenever numerous nodes are listed such as in a switch-case statement +// or a chain of function definitions (e.g. String(), expr(), etc.) convention is +// to list them as follows: +// +// - Statements +// - statement types (alphabetical) +// - ... +// - Expressions +// - expression types (alphabetical) +// - ... +// +type Node interface { + // String representation of the node that returns the given node when parsed + // as part of a valid query. + String() string +} + +// Statement is a generic interface for all statements. +type Statement interface { + Node + + // stmt ensures that no other type accidentally implements the interface + stmt() +} + +// Statements is a list of statement nodes that implements Node. +type Statements []Statement + +// AlertStmt represents an added alert rule. +type AlertStmt struct { + Name string + Expr Expr + Duration time.Duration + Labels labels.Labels + Annotations labels.Labels +} + +// EvalStmt holds an expression and information on the range it should +// be evaluated on. +type EvalStmt struct { + Expr Expr // Expression to be evaluated. + + // The time boundaries for the evaluation. If Start equals End an instant + // is evaluated. + Start, End time.Time + // Time between two evaluated instants for the range [Start:End]. + Interval time.Duration +} + +// RecordStmt represents an added recording rule. +type RecordStmt struct { + Name string + Expr Expr + Labels labels.Labels +} + +func (*AlertStmt) stmt() {} +func (*EvalStmt) stmt() {} +func (*RecordStmt) stmt() {} + +// Expr is a generic interface for all expression types. +type Expr interface { + Node + + // Type returns the type the expression evaluates to. It does not perform + // in-depth checks as this is done at parsing-time. + Type() ValueType + // expr ensures that no other types accidentally implement the interface. + expr() +} + +// Expressions is a list of expression nodes that implements Node. +type Expressions []Expr + +// AggregateExpr represents an aggregation operation on a Vector. +type AggregateExpr struct { + Op ItemType // The used aggregation operation. + Expr Expr // The Vector expression over which is aggregated. + Param Expr // Parameter used by some aggregators. + Grouping []string // The labels by which to group the Vector. + Without bool // Whether to drop the given labels rather than keep them. +} + +// BinaryExpr represents a binary expression between two child expressions. +type BinaryExpr struct { + Op ItemType // The operation of the expression. + LHS, RHS Expr // The operands on the respective sides of the operator. + + // The matching behavior for the operation if both operands are Vectors. + // If they are not this field is nil. + VectorMatching *VectorMatching + + // If a comparison operator, return 0/1 rather than filtering. + ReturnBool bool +} + +// Call represents a function call. +type Call struct { + Func *Function // The function that was called. + Args Expressions // Arguments used in the call. +} + +// MatrixSelector represents a Matrix selection. +type MatrixSelector struct { + Name string + Range time.Duration + Offset time.Duration + LabelMatchers []*labels.Matcher + + // The series are populated at query preparation time. + series []storage.Series +} + +// NumberLiteral represents a number. +type NumberLiteral struct { + Val float64 +} + +// ParenExpr wraps an expression so it cannot be disassembled as a consequence +// of operator precedence. +type ParenExpr struct { + Expr Expr +} + +// StringLiteral represents a string. +type StringLiteral struct { + Val string +} + +// UnaryExpr represents a unary operation on another expression. +// Currently unary operations are only supported for Scalars. +type UnaryExpr struct { + Op ItemType + Expr Expr +} + +// VectorSelector represents a Vector selection. +type VectorSelector struct { + Name string + Offset time.Duration + LabelMatchers []*labels.Matcher + + // The series are populated at query preparation time. + series []storage.Series +} + +func (e *AggregateExpr) Type() ValueType { return ValueTypeVector } +func (e *Call) Type() ValueType { return e.Func.ReturnType } +func (e *MatrixSelector) Type() ValueType { return ValueTypeMatrix } +func (e *NumberLiteral) Type() ValueType { return ValueTypeScalar } +func (e *ParenExpr) Type() ValueType { return e.Expr.Type() } +func (e *StringLiteral) Type() ValueType { return ValueTypeString } +func (e *UnaryExpr) Type() ValueType { return e.Expr.Type() } +func (e *VectorSelector) Type() ValueType { return ValueTypeVector } +func (e *BinaryExpr) Type() ValueType { + if e.LHS.Type() == ValueTypeScalar && e.RHS.Type() == ValueTypeScalar { + return ValueTypeScalar + } + return ValueTypeVector +} + +func (*AggregateExpr) expr() {} +func (*BinaryExpr) expr() {} +func (*Call) expr() {} +func (*MatrixSelector) expr() {} +func (*NumberLiteral) expr() {} +func (*ParenExpr) expr() {} +func (*StringLiteral) expr() {} +func (*UnaryExpr) expr() {} +func (*VectorSelector) expr() {} + +// VectorMatchCardinality describes the cardinality relationship +// of two Vectors in a binary operation. +type VectorMatchCardinality int + +const ( + CardOneToOne VectorMatchCardinality = iota + CardManyToOne + CardOneToMany + CardManyToMany +) + +func (vmc VectorMatchCardinality) String() string { + switch vmc { + case CardOneToOne: + return "one-to-one" + case CardManyToOne: + return "many-to-one" + case CardOneToMany: + return "one-to-many" + case CardManyToMany: + return "many-to-many" + } + panic("promql.VectorMatchCardinality.String: unknown match cardinality") +} + +// VectorMatching describes how elements from two Vectors in a binary +// operation are supposed to be matched. +type VectorMatching struct { + // The cardinality of the two Vectors. + Card VectorMatchCardinality + // MatchingLabels contains the labels which define equality of a pair of + // elements from the Vectors. + MatchingLabels []string + // On includes the given label names from matching, + // rather than excluding them. + On bool + // Include contains additional labels that should be included in + // the result from the side with the lower cardinality. + Include []string +} + +// Visitor allows visiting a Node and its child nodes. The Visit method is +// invoked for each node with the path leading to the node provided additionally. +// If the result visitor w is not nil and no error, Walk visits each of the children +// of node with the visitor w, followed by a call of w.Visit(nil, nil). +type Visitor interface { + Visit(node Node, path []Node) (w Visitor, err error) +} + +// Walk traverses an AST in depth-first order: It starts by calling +// v.Visit(node, path); node must not be nil. If the visitor w returned by +// v.Visit(node, path) is not nil and the visitor returns no error, Walk is +// invoked recursively with visitor w for each of the non-nil children of node, +// followed by a call of w.Visit(nil), returning an error +// As the tree is descended the path of previous nodes is provided. +func Walk(v Visitor, node Node, path []Node) error { + var err error + if v, err = v.Visit(node, path); v == nil || err != nil { + return err + } + path = append(path, node) + + switch n := node.(type) { + case Statements: + for _, s := range n { + if err := Walk(v, s, path); err != nil { + return err + } + } + case *AlertStmt: + if err := Walk(v, n.Expr, path); err != nil { + return err + } + + case *EvalStmt: + if err := Walk(v, n.Expr, path); err != nil { + return err + } + + case *RecordStmt: + if err := Walk(v, n.Expr, path); err != nil { + return err + } + + case Expressions: + for _, e := range n { + if err := Walk(v, e, path); err != nil { + return err + } + } + case *AggregateExpr: + if err := Walk(v, n.Expr, path); err != nil { + return err + } + + case *BinaryExpr: + if err := Walk(v, n.LHS, path); err != nil { + return err + } + if err := Walk(v, n.RHS, path); err != nil { + return err + } + + case *Call: + if err := Walk(v, n.Args, path); err != nil { + return err + } + + case *ParenExpr: + if err := Walk(v, n.Expr, path); err != nil { + return err + } + + case *UnaryExpr: + if err := Walk(v, n.Expr, path); err != nil { + return err + } + + case *MatrixSelector, *NumberLiteral, *StringLiteral, *VectorSelector: + // nothing to do + + default: + panic(fmt.Errorf("promql.Walk: unhandled node type %T", node)) + } + + _, err = v.Visit(nil, nil) + return err +} + +type inspector func(Node, []Node) error + +func (f inspector) Visit(node Node, path []Node) (Visitor, error) { + if err := f(node, path); err == nil { + return f, nil + } else { + return nil, err + } +} + +// Inspect traverses an AST in depth-first order: It starts by calling +// f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f +// for all the non-nil children of node, recursively. +func Inspect(node Node, f inspector) { + Walk(inspector(f), node, nil) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/engine.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/engine.go new file mode 100644 index 000000000000..f47ee739f64f --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/engine.go @@ -0,0 +1,1755 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//nolint //Since this was copied from Prometheus leave it as is +package promql + +import ( + "container/heap" + "context" + "fmt" + "math" + "regexp" + "runtime" + "sort" + "strconv" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + opentracing "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/storage" + + "github.com/prometheus/prometheus/util/stats" +) + +const ( + namespace = "prometheus" + subsystem = "engine" + queryTag = "query" + + // The largest SampleValue that can be converted to an int64 without overflow. + maxInt64 = 9223372036854774784 + // The smallest SampleValue that can be converted to an int64 without underflow. + minInt64 = -9223372036854775808 +) + +type engineMetrics struct { + currentQueries prometheus.Gauge + maxConcurrentQueries prometheus.Gauge + queryQueueTime prometheus.Summary + queryPrepareTime prometheus.Summary + queryInnerEval prometheus.Summary + queryResultSort prometheus.Summary +} + +// convertibleToInt64 returns true if v does not over-/underflow an int64. +func convertibleToInt64(v float64) bool { + return v <= maxInt64 && v >= minInt64 +} + +type ( + // ErrQueryTimeout is returned if a query timed out during processing. + ErrQueryTimeout string + // ErrQueryCanceled is returned if a query was canceled during processing. + ErrQueryCanceled string + // ErrStorage is returned if an error was encountered in the storage layer + // during query handling. + ErrStorage error +) + +func (e ErrQueryTimeout) Error() string { return fmt.Sprintf("query timed out in %s", string(e)) } +func (e ErrQueryCanceled) Error() string { return fmt.Sprintf("query was canceled in %s", string(e)) } + +// A Query is derived from an a raw query string and can be run against an engine +// it is associated with. +type Query interface { + // Exec processes the query. Can only be called once. + Exec(ctx context.Context) *Result + // Close recovers memory used by the query result. + Close() + // Statement returns the parsed statement of the query. + Statement() Statement + // Stats returns statistics about the lifetime of the query. + Stats() *stats.TimerGroup + // Cancel signals that a running query execution should be aborted. + Cancel() +} + +// query implements the Query interface. +type query struct { + // Underlying data provider. + queryable storage.Queryable + // The original query string. + q string + // Statement of the parsed query. + stmt Statement + // Timer stats for the query execution. + stats *stats.TimerGroup + // Result matrix for reuse. + matrix Matrix + // Cancellation function for the query. + cancel func() + + // The engine against which the query is executed. + ng *Engine +} + +// Statement implements the Query interface. +func (q *query) Statement() Statement { + return q.stmt +} + +// Stats implements the Query interface. +func (q *query) Stats() *stats.TimerGroup { + return q.stats +} + +// Cancel implements the Query interface. +func (q *query) Cancel() { + if q.cancel != nil { + q.cancel() + } +} + +// Close implements the Query interface. +func (q *query) Close() { + for _, s := range q.matrix { + putPointSlice(s.Points) + } +} + +// Exec implements the Query interface. +func (q *query) Exec(ctx context.Context) *Result { + if span := opentracing.SpanFromContext(ctx); span != nil { + span.SetTag(queryTag, q.stmt.String()) + } + + res, err := q.ng.exec(ctx, q) + return &Result{Err: err, Value: res} +} + +// contextDone returns an error if the context was canceled or timed out. +func contextDone(ctx context.Context, env string) error { + select { + case <-ctx.Done(): + err := ctx.Err() + switch err { + case context.Canceled: + return ErrQueryCanceled(env) + case context.DeadlineExceeded: + return ErrQueryTimeout(env) + default: + return err + } + default: + return nil + } +} + +// Engine handles the lifetime of queries from beginning to end. +// It is connected to a querier. +type Engine struct { + logger log.Logger + metrics *engineMetrics + timeout time.Duration + gate *queryGate +} + +// NewEngine returns a new engine. +func NewEngine(logger log.Logger, reg prometheus.Registerer, maxConcurrent int, timeout time.Duration) *Engine { + if logger == nil { + logger = log.NewNopLogger() + } + + metrics := &engineMetrics{ + currentQueries: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "queries", + Help: "The current number of queries being executed or waiting.", + }), + maxConcurrentQueries: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "queries_concurrent_max", + Help: "The max number of concurrent queries.", + }), + queryQueueTime: prometheus.NewSummary(prometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_duration_seconds", + Help: "Query timings", + ConstLabels: prometheus.Labels{"slice": "queue_time"}, + }), + queryPrepareTime: prometheus.NewSummary(prometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_duration_seconds", + Help: "Query timings", + ConstLabels: prometheus.Labels{"slice": "prepare_time"}, + }), + queryInnerEval: prometheus.NewSummary(prometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_duration_seconds", + Help: "Query timings", + ConstLabels: prometheus.Labels{"slice": "inner_eval"}, + }), + queryResultSort: prometheus.NewSummary(prometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "query_duration_seconds", + Help: "Query timings", + ConstLabels: prometheus.Labels{"slice": "result_sort"}, + }), + } + metrics.maxConcurrentQueries.Set(float64(maxConcurrent)) + + if reg != nil { + reg.MustRegister( + metrics.currentQueries, + metrics.maxConcurrentQueries, + metrics.queryQueueTime, + metrics.queryPrepareTime, + metrics.queryInnerEval, + metrics.queryResultSort, + ) + } + return &Engine{ + gate: newQueryGate(maxConcurrent), + timeout: timeout, + logger: logger, + metrics: metrics, + } +} + +// NewInstantQuery returns an evaluation query for the given expression at the given time. +func (ng *Engine) NewInstantQuery(q storage.Queryable, qs string, ts time.Time) (Query, error) { + expr, err := ParseExpr(qs) + if err != nil { + return nil, err + } + qry := ng.newQuery(q, expr, ts, ts, 0) + qry.q = qs + + return qry, nil +} + +// NewRangeQuery returns an evaluation query for the given time range and with +// the resolution set by the interval. +func (ng *Engine) NewRangeQuery(q storage.Queryable, qs string, start, end time.Time, interval time.Duration) (Query, error) { + expr, err := ParseExpr(qs) + if err != nil { + return nil, err + } + if expr.Type() != ValueTypeVector && expr.Type() != ValueTypeScalar { + return nil, fmt.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", documentedType(expr.Type())) + } + qry := ng.newQuery(q, expr, start, end, interval) + qry.q = qs + + return qry, nil +} + +func (ng *Engine) newQuery(q storage.Queryable, expr Expr, start, end time.Time, interval time.Duration) *query { + es := &EvalStmt{ + Expr: expr, + Start: start, + End: end, + Interval: interval, + } + qry := &query{ + stmt: es, + ng: ng, + stats: stats.NewTimerGroup(), + queryable: q, + } + return qry +} + +// testStmt is an internal helper statement that allows execution +// of an arbitrary function during handling. It is used to test the Engine. +type testStmt func(context.Context) error + +func (testStmt) String() string { return "test statement" } +func (testStmt) stmt() {} + +func (ng *Engine) newTestQuery(f func(context.Context) error) Query { + qry := &query{ + q: "test statement", + stmt: testStmt(f), + ng: ng, + stats: stats.NewTimerGroup(), + } + return qry +} + +// exec executes the query. +// +// At this point per query only one EvalStmt is evaluated. Alert and record +// statements are not handled by the Engine. +func (ng *Engine) exec(ctx context.Context, q *query) (Value, error) { + ng.metrics.currentQueries.Inc() + defer ng.metrics.currentQueries.Dec() + + ctx, cancel := context.WithTimeout(ctx, ng.timeout) + q.cancel = cancel + + execTimer := q.stats.GetTimer(stats.ExecTotalTime).Start() + defer execTimer.Stop() + queueTimer := q.stats.GetTimer(stats.ExecQueueTime).Start() + + if err := ng.gate.Start(ctx); err != nil { + return nil, err + } + defer ng.gate.Done() + + queueTimer.Stop() + ng.metrics.queryQueueTime.Observe(queueTimer.ElapsedTime().Seconds()) + + // Cancel when execution is done or an error was raised. + defer q.cancel() + + const env = "query execution" + + evalTimer := q.stats.GetTimer(stats.EvalTotalTime).Start() + defer evalTimer.Stop() + + // The base context might already be canceled on the first iteration (e.g. during shutdown). + if err := contextDone(ctx, env); err != nil { + return nil, err + } + + switch s := q.Statement().(type) { + case *EvalStmt: + return ng.execEvalStmt(ctx, q, s) + case testStmt: + return nil, s(ctx) + } + + panic(fmt.Errorf("promql.Engine.exec: unhandled statement of type %T", q.Statement())) +} + +func timeMilliseconds(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +func durationMilliseconds(d time.Duration) int64 { + return int64(d / (time.Millisecond / time.Nanosecond)) +} + +// execEvalStmt evaluates the expression of an evaluation statement for the given time range. +func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *EvalStmt) (Value, error) { + prepareTimer := query.stats.GetTimer(stats.QueryPreparationTime).Start() + querier, err := ng.populateSeries(ctx, query.queryable, s) + prepareTimer.Stop() + ng.metrics.queryPrepareTime.Observe(prepareTimer.ElapsedTime().Seconds()) + + // XXX(fabxc): the querier returned by populateSeries might be instantiated + // we must not return without closing irrespective of the error. + // TODO: make this semantically saner. + if querier != nil { + defer querier.Close() + } + + if err != nil { + return nil, err + } + + evalTimer := query.stats.GetTimer(stats.InnerEvalTime).Start() + // Instant evaluation. This is executed as a range evaluation with one step. + if s.Start == s.End && s.Interval == 0 { + start := timeMilliseconds(s.Start) + evaluator := &evaluator{ + startTimestamp: start, + endTimestamp: start, + interval: 1, + ctx: ctx, + logger: ng.logger, + } + val, err := evaluator.Eval(s.Expr) + if err != nil { + return nil, err + } + + evalTimer.Stop() + ng.metrics.queryInnerEval.Observe(evalTimer.ElapsedTime().Seconds()) + + mat, ok := val.(Matrix) + if !ok { + panic(fmt.Errorf("promql.Engine.exec: invalid expression type %q", val.Type())) + } + query.matrix = mat + switch s.Expr.Type() { + case ValueTypeVector: + // Convert matrix with one value per series into vector. + vector := make(Vector, len(mat)) + for i, s := range mat { + // Point might have a different timestamp, force it to the evaluation + // timestamp as that is when we ran the evaluation. + vector[i] = Sample{Metric: s.Metric, Point: Point{V: s.Points[0].V, T: start}} + } + return vector, nil + case ValueTypeScalar: + return Scalar{V: mat[0].Points[0].V, T: start}, nil + case ValueTypeMatrix: + return mat, nil + default: + panic(fmt.Errorf("promql.Engine.exec: unexpected expression type %q", s.Expr.Type())) + } + + } + + // Range evaluation. + evaluator := &evaluator{ + startTimestamp: timeMilliseconds(s.Start), + endTimestamp: timeMilliseconds(s.End), + interval: durationMilliseconds(s.Interval), + ctx: ctx, + logger: ng.logger, + } + val, err := evaluator.Eval(s.Expr) + if err != nil { + return nil, err + } + evalTimer.Stop() + ng.metrics.queryInnerEval.Observe(evalTimer.ElapsedTime().Seconds()) + + mat, ok := val.(Matrix) + if !ok { + panic(fmt.Errorf("promql.Engine.exec: invalid expression type %q", val.Type())) + } + query.matrix = mat + + if err := contextDone(ctx, "expression evaluation"); err != nil { + return nil, err + } + + // TODO(fabxc): order ensured by storage? + // TODO(fabxc): where to ensure metric labels are a copy from the storage internals. + sortTimer := query.stats.GetTimer(stats.ResultSortTime).Start() + sort.Sort(mat) + sortTimer.Stop() + + ng.metrics.queryResultSort.Observe(sortTimer.ElapsedTime().Seconds()) + return mat, nil +} + +func (ng *Engine) populateSeries(ctx context.Context, q storage.Queryable, s *EvalStmt) (storage.Querier, error) { + var maxOffset time.Duration + Inspect(s.Expr, func(node Node, _ []Node) error { + switch n := node.(type) { + case *VectorSelector: + if maxOffset < LookbackDelta { + maxOffset = LookbackDelta + } + if n.Offset+LookbackDelta > maxOffset { + maxOffset = n.Offset + LookbackDelta + } + case *MatrixSelector: + if maxOffset < n.Range { + maxOffset = n.Range + } + if n.Offset+n.Range > maxOffset { + maxOffset = n.Offset + n.Range + } + } + return nil + }) + + mint := s.Start.Add(-maxOffset) + + querier, err := q.Querier(ctx, timestamp.FromTime(mint), timestamp.FromTime(s.End)) + if err != nil { + return nil, err + } + + Inspect(s.Expr, func(node Node, path []Node) error { + var set storage.SeriesSet + params := &storage.SelectParams{ + Start: timestamp.FromTime(s.Start), + End: timestamp.FromTime(s.End), + Step: int64(s.Interval / time.Millisecond), + } + + switch n := node.(type) { + case *VectorSelector: + params.Start = params.Start - durationMilliseconds(LookbackDelta) + params.Func = extractFuncFromPath(path) + if n.Offset > 0 { + offsetMilliseconds := durationMilliseconds(n.Offset) + params.Start = params.Start - offsetMilliseconds + params.End = params.End - offsetMilliseconds + } + + set, _, err = querier.Select(params, n.LabelMatchers...) + if err != nil { + level.Error(ng.logger).Log("msg", "error selecting series set", "err", err) + return err + } + n.series, err = expandSeriesSet(ctx, set) + if err != nil { + // TODO(fabxc): use multi-error. + level.Error(ng.logger).Log("msg", "error expanding series set", "err", err) + return err + } + + case *MatrixSelector: + params.Func = extractFuncFromPath(path) + // For all matrix queries we want to ensure that we have (end-start) + range selected + // this way we have `range` data before the start time + params.Start = params.Start - durationMilliseconds(n.Range) + if n.Offset > 0 { + offsetMilliseconds := durationMilliseconds(n.Offset) + params.Start = params.Start - offsetMilliseconds + params.End = params.End - offsetMilliseconds + } + + set, _, err = querier.Select(params, n.LabelMatchers...) + if err != nil { + level.Error(ng.logger).Log("msg", "error selecting series set", "err", err) + return err + } + n.series, err = expandSeriesSet(ctx, set) + if err != nil { + level.Error(ng.logger).Log("msg", "error expanding series set", "err", err) + return err + } + } + return nil + }) + return querier, err +} + +// extractFuncFromPath walks up the path and searches for the first instance of +// a function or aggregation. +func extractFuncFromPath(p []Node) string { + if len(p) == 0 { + return "" + } + switch n := p[len(p)-1].(type) { + case *AggregateExpr: + return n.Op.String() + case *Call: + return n.Func.Name + case *BinaryExpr: + // If we hit a binary expression we terminate since we only care about functions + // or aggregations over a single metric. + return "" + } + return extractFuncFromPath(p[:len(p)-1]) +} + +func expandSeriesSet(ctx context.Context, it storage.SeriesSet) (res []storage.Series, err error) { + for it.Next() { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + res = append(res, it.At()) + } + return res, it.Err() +} + +// An evaluator evaluates given expressions over given fixed timestamps. It +// is attached to an engine through which it connects to a querier and reports +// errors. On timeout or cancellation of its context it terminates. +type evaluator struct { + ctx context.Context + + startTimestamp int64 // Start time in milliseconds. + + endTimestamp int64 // End time in milliseconds. + interval int64 // Interval in milliseconds. + + logger log.Logger +} + +// errorf causes a panic with the input formatted into an error. +func (ev *evaluator) errorf(format string, args ...interface{}) { + ev.error(fmt.Errorf(format, args...)) +} + +// error causes a panic with the given error. +func (ev *evaluator) error(err error) { + panic(err) +} + +// recover is the handler that turns panics into returns from the top level of evaluation. +func (ev *evaluator) recover(errp *error) { + e := recover() + if e == nil { + return + } + if err, ok := e.(runtime.Error); ok { + // Print the stack trace but do not inhibit the running application. + buf := make([]byte, 64<<10) + buf = buf[:runtime.Stack(buf, false)] + + level.Error(ev.logger).Log("msg", "runtime panic in parser", "err", e, "stacktrace", string(buf)) + *errp = fmt.Errorf("unexpected error: %s", err) + } else { + *errp = e.(error) + } +} + +func (ev *evaluator) Eval(expr Expr) (v Value, err error) { + defer ev.recover(&err) + return ev.eval(expr), nil +} + +// EvalNodeHelper stores extra information and caches for evaluating a single node across steps. +type EvalNodeHelper struct { + // Evaluation timestamp. + ts int64 + // Vector that can be used for output. + out Vector + + // Caches. + // dropMetricName and label_*. + dmn map[uint64]labels.Labels + // signatureFunc. + sigf map[uint64]uint64 + // funcHistogramQuantile. + signatureToMetricWithBuckets map[uint64]*metricWithBuckets + // label_replace. + regex *regexp.Regexp + + // For binary vector matching. + rightSigs map[uint64]Sample + matchedSigs map[uint64]map[uint64]struct{} + resultMetric map[uint64]labels.Labels +} + +// dropMetricName is a cached version of dropMetricName. +func (enh *EvalNodeHelper) dropMetricName(l labels.Labels) labels.Labels { + if enh.dmn == nil { + enh.dmn = make(map[uint64]labels.Labels, len(enh.out)) + } + h := l.Hash() + ret, ok := enh.dmn[h] + if ok { + return ret + } + ret = dropMetricName(l) + enh.dmn[h] = ret + return ret +} + +// signatureFunc is a cached version of signatureFunc. +func (enh *EvalNodeHelper) signatureFunc(on bool, names ...string) func(labels.Labels) uint64 { + if enh.sigf == nil { + enh.sigf = make(map[uint64]uint64, len(enh.out)) + } + f := signatureFunc(on, names...) + return func(l labels.Labels) uint64 { + h := l.Hash() + ret, ok := enh.sigf[h] + if ok { + return ret + } + ret = f(l) + enh.sigf[h] = ret + return ret + } +} + +// rangeEval evaluates the given expressions, and then for each step calls +// the given function with the values computed for each expression at that +// step. The return value is the combination into time series of of all the +// function call results. +func (ev *evaluator) rangeEval(f func([]Value, *EvalNodeHelper) Vector, exprs ...Expr) Matrix { + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 + matrixes := make([]Matrix, len(exprs)) + origMatrixes := make([]Matrix, len(exprs)) + for i, e := range exprs { + // Functions will take string arguments from the expressions, not the values. + if e != nil && e.Type() != ValueTypeString { + matrixes[i] = ev.eval(e).(Matrix) + + // Keep a copy of the original point slices so that they + // can be returned to the pool. + origMatrixes[i] = make(Matrix, len(matrixes[i])) + copy(origMatrixes[i], matrixes[i]) + } + } + + vectors := make([]Vector, len(exprs)) // Input vectors for the function. + args := make([]Value, len(exprs)) // Argument to function. + // Create an output vector that is as big as the input matrix with + // the most time series. + biggestLen := 1 + for i := range exprs { + vectors[i] = make(Vector, 0, len(matrixes[i])) + if len(matrixes[i]) > biggestLen { + biggestLen = len(matrixes[i]) + } + } + enh := &EvalNodeHelper{out: make(Vector, 0, biggestLen)} + seriess := make(map[uint64]Series, biggestLen) // Output series by series hash. + for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { + // Gather input vectors for this timestamp. + for i := range exprs { + vectors[i] = vectors[i][:0] + for si, series := range matrixes[i] { + for _, point := range series.Points { + if point.T == ts { + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, Point: point}) + // Move input vectors forward so we don't have to re-scan the same + // past points at the next step. + matrixes[i][si].Points = series.Points[1:] + } + break + } + } + args[i] = vectors[i] + } + // Make the function call. + enh.ts = ts + result := f(args, enh) + enh.out = result[:0] // Reuse result vector. + // If this could be an instant query, shortcut so as not to change sort order. + if ev.endTimestamp == ev.startTimestamp { + mat := make(Matrix, len(result)) + for i, s := range result { + s.Point.T = ts + mat[i] = Series{Metric: s.Metric, Points: []Point{s.Point}} + } + return mat + } + // Add samples in output vector to output series. + for _, sample := range result { + h := sample.Metric.Hash() + ss, ok := seriess[h] + if !ok { + ss = Series{ + Metric: sample.Metric, + Points: getPointSlice(numSteps), + } + } + sample.Point.T = ts + ss.Points = append(ss.Points, sample.Point) + seriess[h] = ss + } + } + // Reuse the original point slices. + for _, m := range origMatrixes { + for _, s := range m { + putPointSlice(s.Points) + } + } + // Assemble the output matrix. + mat := make(Matrix, 0, len(seriess)) + for _, ss := range seriess { + mat = append(mat, ss) + } + return mat +} + +// eval evaluates the given expression as the given AST expression node requires. +func (ev *evaluator) eval(expr Expr) Value { + // This is the top-level evaluation method. + // Thus, we check for timeout/cancellation here. + if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + ev.error(err) + } + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 + + switch e := expr.(type) { + case *AggregateExpr: + if s, ok := e.Param.(*StringLiteral); ok { + return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { + return ev.aggregation(e.Op, e.Grouping, e.Without, s.Val, v[0].(Vector), enh) + }, e.Expr) + } + return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { + var param float64 + if e.Param != nil { + param = v[0].(Vector)[0].V + } + return ev.aggregation(e.Op, e.Grouping, e.Without, param, v[1].(Vector), enh) + }, e.Param, e.Expr) + + case *Call: + if e.Func.Name == "timestamp" { + // Matrix evaluation always returns the evaluation time, + // so this function needs special handling when given + // a vector selector. + vs, ok := e.Args[0].(*VectorSelector) + if ok { + return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { + return e.Func.Call([]Value{ev.vectorSelector(vs, enh.ts)}, e.Args, enh) + }) + } + } + // Check if the function has a matrix argument. + var matrixArgIndex int + var matrixArg bool + for i, a := range e.Args { + _, ok := a.(*MatrixSelector) + if ok { + matrixArgIndex = i + matrixArg = true + break + } + } + if !matrixArg { + // Does not have a matrix argument. + return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { + return e.Func.Call(v, e.Args, enh) + }, e.Args...) + } + + inArgs := make([]Value, len(e.Args)) + // Evaluate any non-matrix arguments. + otherArgs := make([]Matrix, len(e.Args)) + otherInArgs := make([]Vector, len(e.Args)) + for i, e := range e.Args { + if i != matrixArgIndex { + otherArgs[i] = ev.eval(e).(Matrix) + otherInArgs[i] = Vector{Sample{}} + inArgs[i] = otherInArgs[i] + } + } + + sel := e.Args[matrixArgIndex].(*MatrixSelector) + mat := make(Matrix, 0, len(sel.series)) // Output matrix. + offset := durationMilliseconds(sel.Offset) + selRange := durationMilliseconds(sel.Range) + stepRange := selRange + if stepRange > ev.interval { + stepRange = ev.interval + } + // Reuse objects across steps to save memory allocations. + points := getPointSlice(16) + inMatrix := make(Matrix, 1) + inArgs[matrixArgIndex] = inMatrix + enh := &EvalNodeHelper{out: make(Vector, 0, 1)} + // Process all the calls for one time series at a time. + it := storage.NewBuffer(selRange) + for i, s := range sel.series { + points = points[:0] + it.Reset(s.Iterator()) + ss := Series{ + // For all range vector functions, the only change to the + // output labels is dropping the metric name so just do + // it once here. + Metric: dropMetricName(sel.series[i].Labels()), + Points: getPointSlice(numSteps), + } + inMatrix[0].Metric = sel.series[i].Labels() + for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { + step++ + // Set the non-matrix arguments. + // They are scalar, so it is safe to use the step number + // when looking up the argument, as there will be no gaps. + for j := range e.Args { + if j != matrixArgIndex { + otherInArgs[j][0].V = otherArgs[j][0].Points[step].V + } + } + maxt := ts - offset + mint := maxt - selRange + // Evaluate the matrix selector for this series for this step. + points = ev.matrixIterSlice(it, mint, maxt, points) + if len(points) == 0 { + continue + } + inMatrix[0].Points = points + enh.ts = ts + // Make the function call. + outVec := e.Func.Call(inArgs, e.Args, enh) + enh.out = outVec[:0] + if len(outVec) > 0 { + ss.Points = append(ss.Points, Point{V: outVec[0].Point.V, T: ts}) + } + // Only buffer stepRange milliseconds from the second step on. + it.ReduceDelta(stepRange) + } + if len(ss.Points) > 0 { + mat = append(mat, ss) + } + } + putPointSlice(points) + return mat + + case *ParenExpr: + return ev.eval(e.Expr) + + case *UnaryExpr: + mat := ev.eval(e.Expr).(Matrix) + if e.Op == itemSUB { + for i := range mat { + mat[i].Metric = dropMetricName(mat[i].Metric) + for j := range mat[i].Points { + mat[i].Points[j].V = -mat[i].Points[j].V + } + } + } + return mat + + case *BinaryExpr: + switch lt, rt := e.LHS.Type(), e.RHS.Type(); { + case lt == ValueTypeScalar && rt == ValueTypeScalar: + return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { + val := scalarBinop(e.Op, v[0].(Vector)[0].Point.V, v[1].(Vector)[0].Point.V) + return append(enh.out, Sample{Point: Point{V: val}}) + }, e.LHS, e.RHS) + case lt == ValueTypeVector && rt == ValueTypeVector: + switch e.Op { + case itemLAND: + return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { + return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh) + }, e.LHS, e.RHS) + case itemLOR: + return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { + return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh) + }, e.LHS, e.RHS) + case itemLUnless: + return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { + return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, enh) + }, e.LHS, e.RHS) + default: + return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { + return ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, enh) + }, e.LHS, e.RHS) + } + + case lt == ValueTypeVector && rt == ValueTypeScalar: + return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { + return ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].Point.V}, false, e.ReturnBool, enh) + }, e.LHS, e.RHS) + + case lt == ValueTypeScalar && rt == ValueTypeVector: + return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { + return ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].Point.V}, true, e.ReturnBool, enh) + }, e.LHS, e.RHS) + } + + case *NumberLiteral: + return ev.rangeEval(func(v []Value, enh *EvalNodeHelper) Vector { + return append(enh.out, Sample{Point: Point{V: e.Val}}) + }) + + case *VectorSelector: + mat := make(Matrix, 0, len(e.series)) + it := storage.NewBuffer(durationMilliseconds(LookbackDelta)) + for i, s := range e.series { + it.Reset(s.Iterator()) + ss := Series{ + Metric: e.series[i].Labels(), + Points: getPointSlice(numSteps), + } + + for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { + _, v, ok := ev.vectorSelectorSingle(it, e, ts) + if ok { + ss.Points = append(ss.Points, Point{V: v, T: ts}) + } + } + + if len(ss.Points) > 0 { + mat = append(mat, ss) + } + } + return mat + + case *MatrixSelector: + if ev.startTimestamp != ev.endTimestamp { + panic(fmt.Errorf("cannot do range evaluation of matrix selector")) + } + return ev.matrixSelector(e) + } + + panic(fmt.Errorf("unhandled expression of type: %T", expr)) +} + +// vectorSelector evaluates a *VectorSelector expression. +func (ev *evaluator) vectorSelector(node *VectorSelector, ts int64) Vector { + var ( + vec = make(Vector, 0, len(node.series)) + ) + + it := storage.NewBuffer(durationMilliseconds(LookbackDelta)) + for i, s := range node.series { + it.Reset(s.Iterator()) + + t, v, ok := ev.vectorSelectorSingle(it, node, ts) + if ok { + vec = append(vec, Sample{ + Metric: node.series[i].Labels(), + Point: Point{V: v, T: t}, + }) + } + + } + return vec +} + +// vectorSelectorSingle evaluates a instant vector for the iterator of one time series. +func (ev *evaluator) vectorSelectorSingle(it *storage.BufferedSeriesIterator, node *VectorSelector, ts int64) (int64, float64, bool) { + refTime := ts - durationMilliseconds(node.Offset) + var t int64 + var v float64 + + ok := it.Seek(refTime) + if !ok { + if it.Err() != nil { + ev.error(it.Err()) + } + } + + if ok { + t, v = it.Values() + } + + if !ok || t > refTime { + t, v, ok = it.PeekBack(1) + if !ok || t < refTime-durationMilliseconds(LookbackDelta) { + return 0, 0, false + } + } + if value.IsStaleNaN(v) { + return 0, 0, false + } + return t, v, true +} + +var pointPool = sync.Pool{} + +func getPointSlice(sz int) []Point { + p := pointPool.Get() + if p != nil { + return p.([]Point) + } + return make([]Point, 0, sz) +} + +func putPointSlice(p []Point) { + pointPool.Put(p[:0]) +} + +// matrixSelector evaluates a *MatrixSelector expression. +func (ev *evaluator) matrixSelector(node *MatrixSelector) Matrix { + var ( + offset = durationMilliseconds(node.Offset) + maxt = ev.startTimestamp - offset + mint = maxt - durationMilliseconds(node.Range) + matrix = make(Matrix, 0, len(node.series)) + ) + + it := storage.NewBuffer(durationMilliseconds(node.Range)) + for i, s := range node.series { + if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + ev.error(err) + } + it.Reset(s.Iterator()) + ss := Series{ + Metric: node.series[i].Labels(), + } + + ss.Points = ev.matrixIterSlice(it, mint, maxt, getPointSlice(16)) + + if len(ss.Points) > 0 { + matrix = append(matrix, ss) + } else { + putPointSlice(ss.Points) + } + } + return matrix +} + +// matrixIterSlice populates a matrix vector covering the requested range for a +// single time series, with points retrieved from an iterator. +// +// As an optimization, the matrix vector may already contain points of the same +// time series from the evaluation of an earlier step (with lower mint and maxt +// values). Any such points falling before mint are discarded; points that fall +// into the [mint, maxt] range are retained; only points with later timestamps +// are populated from the iterator. +func (ev *evaluator) matrixIterSlice(it *storage.BufferedSeriesIterator, mint, maxt int64, out []Point) []Point { + if len(out) > 0 && out[len(out)-1].T >= mint { + // There is an overlap between previous and current ranges, retain common + // points. In most such cases: + // (a) the overlap is significantly larger than the eval step; and/or + // (b) the number of samples is relatively small. + // so a linear search will be as fast as a binary search. + var drop int + for drop = 0; out[drop].T < mint; drop++ { + } + copy(out, out[drop:]) + out = out[:len(out)-drop] + // Only append points with timestamps after the last timestamp we have. + mint = out[len(out)-1].T + 1 + } else { + out = out[:0] + } + + ok := it.Seek(maxt) + if !ok { + if it.Err() != nil { + ev.error(it.Err()) + } + } + + buf := it.Buffer() + for buf.Next() { + t, v := buf.At() + if value.IsStaleNaN(v) { + continue + } + // Values in the buffer are guaranteed to be smaller than maxt. + if t >= mint { + out = append(out, Point{T: t, V: v}) + } + } + // The seeked sample might also be in the range. + if ok { + t, v := it.Values() + if t == maxt && !value.IsStaleNaN(v) { + out = append(out, Point{T: t, V: v}) + } + } + return out +} + +func (ev *evaluator) VectorAnd(lhs, rhs Vector, matching *VectorMatching, enh *EvalNodeHelper) Vector { + if matching.Card != CardManyToMany { + panic("set operations must only use many-to-many matching") + } + sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...) + + // The set of signatures for the right-hand side Vector. + rightSigs := map[uint64]struct{}{} + // Add all rhs samples to a map so we can easily find matches later. + for _, rs := range rhs { + rightSigs[sigf(rs.Metric)] = struct{}{} + } + + for _, ls := range lhs { + // If there's a matching entry in the right-hand side Vector, add the sample. + if _, ok := rightSigs[sigf(ls.Metric)]; ok { + enh.out = append(enh.out, ls) + } + } + return enh.out +} + +func (ev *evaluator) VectorOr(lhs, rhs Vector, matching *VectorMatching, enh *EvalNodeHelper) Vector { + if matching.Card != CardManyToMany { + panic("set operations must only use many-to-many matching") + } + sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...) + + leftSigs := map[uint64]struct{}{} + // Add everything from the left-hand-side Vector. + for _, ls := range lhs { + leftSigs[sigf(ls.Metric)] = struct{}{} + enh.out = append(enh.out, ls) + } + // Add all right-hand side elements which have not been added from the left-hand side. + for _, rs := range rhs { + if _, ok := leftSigs[sigf(rs.Metric)]; !ok { + enh.out = append(enh.out, rs) + } + } + return enh.out +} + +func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *VectorMatching, enh *EvalNodeHelper) Vector { + if matching.Card != CardManyToMany { + panic("set operations must only use many-to-many matching") + } + sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...) + + rightSigs := map[uint64]struct{}{} + for _, rs := range rhs { + rightSigs[sigf(rs.Metric)] = struct{}{} + } + + for _, ls := range lhs { + if _, ok := rightSigs[sigf(ls.Metric)]; !ok { + enh.out = append(enh.out, ls) + } + } + return enh.out +} + +// VectorBinop evaluates a binary operation between two Vectors, excluding set operators. +func (ev *evaluator) VectorBinop(op ItemType, lhs, rhs Vector, matching *VectorMatching, returnBool bool, enh *EvalNodeHelper) Vector { + if matching.Card == CardManyToMany { + panic("many-to-many only allowed for set operators") + } + sigf := enh.signatureFunc(matching.On, matching.MatchingLabels...) + + // The control flow below handles one-to-one or many-to-one matching. + // For one-to-many, swap sidedness and account for the swap when calculating + // values. + if matching.Card == CardOneToMany { + lhs, rhs = rhs, lhs + } + + // All samples from the rhs hashed by the matching label/values. + if enh.rightSigs == nil { + enh.rightSigs = make(map[uint64]Sample, len(enh.out)) + } else { + for k := range enh.rightSigs { + delete(enh.rightSigs, k) + } + } + rightSigs := enh.rightSigs + + // Add all rhs samples to a map so we can easily find matches later. + for _, rs := range rhs { + sig := sigf(rs.Metric) + // The rhs is guaranteed to be the 'one' side. Having multiple samples + // with the same signature means that the matching is many-to-many. + if _, found := rightSigs[sig]; found { + // Many-to-many matching not allowed. + ev.errorf("many-to-many matching not allowed: matching labels must be unique on one side") + } + rightSigs[sig] = rs + } + + // Tracks the match-signature. For one-to-one operations the value is nil. For many-to-one + // the value is a set of signatures to detect duplicated result elements. + if enh.matchedSigs == nil { + enh.matchedSigs = make(map[uint64]map[uint64]struct{}, len(rightSigs)) + } else { + for k := range enh.matchedSigs { + delete(enh.matchedSigs, k) + } + } + matchedSigs := enh.matchedSigs + + // For all lhs samples find a respective rhs sample and perform + // the binary operation. + for _, ls := range lhs { + sig := sigf(ls.Metric) + + rs, found := rightSigs[sig] // Look for a match in the rhs Vector. + if !found { + continue + } + + // Account for potentially swapped sidedness. + vl, vr := ls.V, rs.V + if matching.Card == CardOneToMany { + vl, vr = vr, vl + } + value, keep := vectorElemBinop(op, vl, vr) + if returnBool { + if keep { + value = 1.0 + } else { + value = 0.0 + } + } else if !keep { + continue + } + metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh) + + insertedSigs, exists := matchedSigs[sig] + if matching.Card == CardOneToOne { + if exists { + ev.errorf("multiple matches for labels: many-to-one matching must be explicit (group_left/group_right)") + } + matchedSigs[sig] = nil // Set existence to true. + } else { + // In many-to-one matching the grouping labels have to ensure a unique metric + // for the result Vector. Check whether those labels have already been added for + // the same matching labels. + insertSig := metric.Hash() + + if !exists { + insertedSigs = map[uint64]struct{}{} + matchedSigs[sig] = insertedSigs + } else if _, duplicate := insertedSigs[insertSig]; duplicate { + ev.errorf("multiple matches for labels: grouping labels must ensure unique matches") + } + insertedSigs[insertSig] = struct{}{} + } + + enh.out = append(enh.out, Sample{ + Metric: metric, + Point: Point{V: value}, + }) + } + return enh.out +} + +// signatureFunc returns a function that calculates the signature for a metric +// ignoring the provided labels. If on, then the given labels are only used instead. +func signatureFunc(on bool, names ...string) func(labels.Labels) uint64 { + sort.Strings(names) + if on { + return func(lset labels.Labels) uint64 { + h, _ := lset.HashForLabels(make([]byte, 0, 1024), names...) + return h + } + } + return func(lset labels.Labels) uint64 { + h, _ := lset.HashWithoutLabels(make([]byte, 0, 1024), names...) + return h + } +} + +// resultMetric returns the metric for the given sample(s) based on the Vector +// binary operation and the matching options. +func resultMetric(lhs, rhs labels.Labels, op ItemType, matching *VectorMatching, enh *EvalNodeHelper) labels.Labels { + if enh.resultMetric == nil { + enh.resultMetric = make(map[uint64]labels.Labels, len(enh.out)) + } + // op and matching are always the same for a given node, so + // there's no need to include them in the hash key. + // If the lhs and rhs are the same then the xor would be 0, + // so add in one side to protect against that. + lh := lhs.Hash() + h := (lh ^ rhs.Hash()) + lh + if ret, ok := enh.resultMetric[h]; ok { + return ret + } + + lb := labels.NewBuilder(lhs) + + if shouldDropMetricName(op) { + lb.Del(labels.MetricName) + } + + if matching.Card == CardOneToOne { + if matching.On { + Outer: + for _, l := range lhs { + for _, n := range matching.MatchingLabels { + if l.Name == n { + continue Outer + } + } + lb.Del(l.Name) + } + } else { + lb.Del(matching.MatchingLabels...) + } + } + for _, ln := range matching.Include { + // Included labels from the `group_x` modifier are taken from the "one"-side. + if v := rhs.Get(ln); v != "" { + lb.Set(ln, v) + } else { + lb.Del(ln) + } + } + + ret := lb.Labels() + enh.resultMetric[h] = ret + return ret +} + +// VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. +func (ev *evaluator) VectorscalarBinop(op ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) Vector { + for _, lhsSample := range lhs { + lv, rv := lhsSample.V, rhs.V + // lhs always contains the Vector. If the original position was different + // swap for calculating the value. + if swap { + lv, rv = rv, lv + } + value, keep := vectorElemBinop(op, lv, rv) + if returnBool { + if keep { + value = 1.0 + } else { + value = 0.0 + } + keep = true + } + if keep { + lhsSample.V = value + if shouldDropMetricName(op) || returnBool { + lhsSample.Metric = enh.dropMetricName(lhsSample.Metric) + } + enh.out = append(enh.out, lhsSample) + } + } + return enh.out +} + +func dropMetricName(l labels.Labels) labels.Labels { + return labels.NewBuilder(l).Del(labels.MetricName).Labels() +} + +// scalarBinop evaluates a binary operation between two Scalars. +func scalarBinop(op ItemType, lhs, rhs float64) float64 { + switch op { + case itemADD: + return lhs + rhs + case itemSUB: + return lhs - rhs + case itemMUL: + return lhs * rhs + case itemDIV: + return lhs / rhs + case itemPOW: + return math.Pow(lhs, rhs) + case itemMOD: + return math.Mod(lhs, rhs) + case itemEQL: + return btos(lhs == rhs) + case itemNEQ: + return btos(lhs != rhs) + case itemGTR: + return btos(lhs > rhs) + case itemLSS: + return btos(lhs < rhs) + case itemGTE: + return btos(lhs >= rhs) + case itemLTE: + return btos(lhs <= rhs) + } + panic(fmt.Errorf("operator %q not allowed for Scalar operations", op)) +} + +// vectorElemBinop evaluates a binary operation between two Vector elements. +func vectorElemBinop(op ItemType, lhs, rhs float64) (float64, bool) { + switch op { + case itemADD: + return lhs + rhs, true + case itemSUB: + return lhs - rhs, true + case itemMUL: + return lhs * rhs, true + case itemDIV: + return lhs / rhs, true + case itemPOW: + return math.Pow(lhs, rhs), true + case itemMOD: + return math.Mod(lhs, rhs), true + case itemEQL: + return lhs, lhs == rhs + case itemNEQ: + return lhs, lhs != rhs + case itemGTR: + return lhs, lhs > rhs + case itemLSS: + return lhs, lhs < rhs + case itemGTE: + return lhs, lhs >= rhs + case itemLTE: + return lhs, lhs <= rhs + } + panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op)) +} + +// intersection returns the metric of common label/value pairs of two input metrics. +func intersection(ls1, ls2 labels.Labels) labels.Labels { + res := make(labels.Labels, 0, 5) + + for _, l1 := range ls1 { + for _, l2 := range ls2 { + if l1.Name == l2.Name && l1.Value == l2.Value { + res = append(res, l1) + continue + } + } + } + return res +} + +type groupedAggregation struct { + labels labels.Labels + value float64 + valuesSquaredSum float64 + groupCount int + heap vectorByValueHeap + reverseHeap vectorByReverseValueHeap +} + +// aggregation evaluates an aggregation operation on a Vector. +func (ev *evaluator) aggregation(op ItemType, grouping []string, without bool, param interface{}, vec Vector, enh *EvalNodeHelper) Vector { + + result := map[uint64]*groupedAggregation{} + var k int64 + if op == itemTopK || op == itemBottomK { + f := param.(float64) + if !convertibleToInt64(f) { + ev.errorf("Scalar value %v overflows int64", f) + } + k = int64(f) + if k < 1 { + return Vector{} + } + } + var q float64 + if op == itemQuantile { + q = param.(float64) + } + var valueLabel string + if op == itemCountValues { + valueLabel = param.(string) + if !without { + grouping = append(grouping, valueLabel) + } + } + + sort.Strings(grouping) + buf := make([]byte, 0, 1024) + for _, s := range vec { + metric := s.Metric + + if op == itemCountValues { + lb := labels.NewBuilder(metric) + lb.Set(valueLabel, strconv.FormatFloat(s.V, 'f', -1, 64)) + metric = lb.Labels() + } + + var ( + groupingKey uint64 + ) + if without { + groupingKey, buf = metric.HashWithoutLabels(buf, grouping...) + } else { + groupingKey, buf = metric.HashForLabels(buf, grouping...) + } + + group, ok := result[groupingKey] + // Add a new group if it doesn't exist. + if !ok { + var m labels.Labels + + if without { + lb := labels.NewBuilder(metric) + lb.Del(grouping...) + lb.Del(labels.MetricName) + m = lb.Labels() + } else { + m = make(labels.Labels, 0, len(grouping)) + for _, l := range metric { + for _, n := range grouping { + if l.Name == n { + m = append(m, l) + break + } + } + } + sort.Sort(m) + } + result[groupingKey] = &groupedAggregation{ + labels: m, + value: s.V, + valuesSquaredSum: s.V * s.V, + groupCount: 1, + } + inputVecLen := int64(len(vec)) + resultSize := k + if k > inputVecLen { + resultSize = inputVecLen + } + if op == itemTopK || op == itemQuantile { + result[groupingKey].heap = make(vectorByValueHeap, 0, resultSize) + heap.Push(&result[groupingKey].heap, &Sample{ + Point: Point{V: s.V}, + Metric: s.Metric, + }) + } else if op == itemBottomK { + result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 0, resultSize) + heap.Push(&result[groupingKey].reverseHeap, &Sample{ + Point: Point{V: s.V}, + Metric: s.Metric, + }) + } + continue + } + + switch op { + case itemSum: + group.value += s.V + + case itemAvg: + group.value += s.V + group.groupCount++ + + case itemMax: + if group.value < s.V || math.IsNaN(group.value) { + group.value = s.V + } + + case itemMin: + if group.value > s.V || math.IsNaN(group.value) { + group.value = s.V + } + + case itemCount, itemCountValues: + group.groupCount++ + + case itemStdvar, itemStddev: + group.value += s.V + group.valuesSquaredSum += s.V * s.V + group.groupCount++ + + case itemTopK: + if int64(len(group.heap)) < k || group.heap[0].V < s.V || math.IsNaN(group.heap[0].V) { + if int64(len(group.heap)) == k { + heap.Pop(&group.heap) + } + heap.Push(&group.heap, &Sample{ + Point: Point{V: s.V}, + Metric: s.Metric, + }) + } + + case itemBottomK: + if int64(len(group.reverseHeap)) < k || group.reverseHeap[0].V > s.V || math.IsNaN(group.reverseHeap[0].V) { + if int64(len(group.reverseHeap)) == k { + heap.Pop(&group.reverseHeap) + } + heap.Push(&group.reverseHeap, &Sample{ + Point: Point{V: s.V}, + Metric: s.Metric, + }) + } + + case itemQuantile: + group.heap = append(group.heap, s) + + default: + panic(fmt.Errorf("expected aggregation operator but got %q", op)) + } + } + + // Construct the result Vector from the aggregated groups. + for _, aggr := range result { + switch op { + case itemAvg: + aggr.value = aggr.value / float64(aggr.groupCount) + + case itemCount, itemCountValues: + aggr.value = float64(aggr.groupCount) + + case itemStdvar: + avg := aggr.value / float64(aggr.groupCount) + aggr.value = aggr.valuesSquaredSum/float64(aggr.groupCount) - avg*avg + + case itemStddev: + avg := aggr.value / float64(aggr.groupCount) + aggr.value = math.Sqrt(aggr.valuesSquaredSum/float64(aggr.groupCount) - avg*avg) + + case itemTopK: + // The heap keeps the lowest value on top, so reverse it. + sort.Sort(sort.Reverse(aggr.heap)) + for _, v := range aggr.heap { + enh.out = append(enh.out, Sample{ + Metric: v.Metric, + Point: Point{V: v.V}, + }) + } + continue // Bypass default append. + + case itemBottomK: + // The heap keeps the lowest value on top, so reverse it. + sort.Sort(sort.Reverse(aggr.reverseHeap)) + for _, v := range aggr.reverseHeap { + enh.out = append(enh.out, Sample{ + Metric: v.Metric, + Point: Point{V: v.V}, + }) + } + continue // Bypass default append. + + case itemQuantile: + aggr.value = quantile(q, aggr.heap) + + default: + // For other aggregations, we already have the right value. + } + + enh.out = append(enh.out, Sample{ + Metric: aggr.labels, + Point: Point{V: aggr.value}, + }) + } + return enh.out +} + +// btos returns 1 if b is true, 0 otherwise. +func btos(b bool) float64 { + if b { + return 1 + } + return 0 +} + +// shouldDropMetricName returns whether the metric name should be dropped in the +// result of the op operation. +func shouldDropMetricName(op ItemType) bool { + switch op { + case itemADD, itemSUB, itemDIV, itemMUL, itemMOD: + return true + default: + return false + } +} + +// LookbackDelta determines the time since the last sample after which a time +// series is considered stale. +var LookbackDelta = 5 * time.Minute + +// A queryGate controls the maximum number of concurrently running and waiting queries. +type queryGate struct { + ch chan struct{} +} + +// newQueryGate returns a query gate that limits the number of queries +// being concurrently executed. +func newQueryGate(length int) *queryGate { + return &queryGate{ + ch: make(chan struct{}, length), + } +} + +// Start blocks until the gate has a free spot or the context is done. +func (g *queryGate) Start(ctx context.Context) error { + select { + case <-ctx.Done(): + return contextDone(ctx, "query queue") + case g.ch <- struct{}{}: + return nil + } +} + +// Done releases a single spot in the gate. +func (g *queryGate) Done() { + select { + case <-g.ch: + default: + panic("engine.queryGate.Done: more operations done than started") + } +} + +// documentedType returns the internal type to the equivalent +// user facing terminology as defined in the documentation. +func documentedType(t ValueType) string { + switch t { + case "vector": + return "instant vector" + case "matrix": + return "range vector" + default: + return string(t) + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/functions.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/functions.go new file mode 100644 index 000000000000..c40c5eae486b --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/functions.go @@ -0,0 +1,1269 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "fmt" + "math" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" +) + +// Function represents a function of the expression language and is +// used by function nodes. +type Function struct { + Name string + ArgTypes []ValueType + Variadic int + ReturnType ValueType + + // vals is a list of the evaluated arguments for the function call. + // For range vectors it will be a Matrix with one series, instant vectors a + // Vector, scalars a Vector with one series whose value is the scalar + // value,and nil for strings. + // args are the original arguments to the function, where you can access + // matrixSelectors, vectorSelectors, and StringLiterals. + // enh.out is a pre-allocated empty vector that you may use to accumulate + // output before returning it. The vectors in vals should not be returned.a + // Range vector functions need only return a vector with the right value, + // the metric and timestamp are not neded. + // Instant vector functions need only return a vector with the right values and + // metrics, the timestamp are not needed. + // Scalar results should be returned as the value of a sample in a Vector. + Call func(vals []Value, args Expressions, enh *EvalNodeHelper) Vector +} + +// === time() float64 === +func funcTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return Vector{Sample{Point: Point{ + V: float64(enh.ts) / 1000, + }}} +} + +// extrapolatedRate is a utility function for rate/increase/delta. +// It calculates the rate (allowing for counter resets if isCounter is true), +// extrapolates if the first/last sample is close to the boundary, and returns +// the result as either per-second (if isRate is true) or overall. +func extrapolatedRate(vals []Value, args Expressions, enh *EvalNodeHelper, isCounter bool, isRate bool) Vector { + ms := args[0].(*MatrixSelector) + + var ( + matrix = vals[0].(Matrix) + rangeStart = enh.ts - durationMilliseconds(ms.Range+ms.Offset) + rangeEnd = enh.ts - durationMilliseconds(ms.Offset) + ) + + for _, samples := range matrix { + // No sense in trying to compute a rate without at least two points. Drop + // this Vector element. + if len(samples.Points) < 2 { + continue + } + var ( + counterCorrection float64 + lastValue float64 + ) + for _, sample := range samples.Points { + if isCounter && sample.V < lastValue { + counterCorrection += lastValue + } + lastValue = sample.V + } + resultValue := lastValue - samples.Points[0].V + counterCorrection + + // Duration between first/last samples and boundary of range. + durationToStart := float64(samples.Points[0].T-rangeStart) / 1000 + durationToEnd := float64(rangeEnd-samples.Points[len(samples.Points)-1].T) / 1000 + + sampledInterval := float64(samples.Points[len(samples.Points)-1].T-samples.Points[0].T) / 1000 + averageDurationBetweenSamples := sampledInterval / float64(len(samples.Points)-1) + + if isCounter && resultValue > 0 && samples.Points[0].V >= 0 { + // Counters cannot be negative. If we have any slope at + // all (i.e. resultValue went up), we can extrapolate + // the zero point of the counter. If the duration to the + // zero point is shorter than the durationToStart, we + // take the zero point as the start of the series, + // thereby avoiding extrapolation to negative counter + // values. + durationToZero := sampledInterval * (samples.Points[0].V / resultValue) + if durationToZero < durationToStart { + durationToStart = durationToZero + } + } + + // If the first/last samples are close to the boundaries of the range, + // extrapolate the result. This is as we expect that another sample + // will exist given the spacing between samples we've seen thus far, + // with an allowance for noise. + extrapolationThreshold := averageDurationBetweenSamples * 1.1 + extrapolateToInterval := sampledInterval + + if durationToStart < extrapolationThreshold { + extrapolateToInterval += durationToStart + } else { + extrapolateToInterval += averageDurationBetweenSamples / 2 + } + if durationToEnd < extrapolationThreshold { + extrapolateToInterval += durationToEnd + } else { + extrapolateToInterval += averageDurationBetweenSamples / 2 + } + resultValue = resultValue * (extrapolateToInterval / sampledInterval) + if isRate { + resultValue = resultValue / ms.Range.Seconds() + } + + enh.out = append(enh.out, Sample{ + Point: Point{V: resultValue}, + }) + } + return enh.out +} + +// === delta(Matrix ValueTypeMatrix) Vector === +func funcDelta(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return extrapolatedRate(vals, args, enh, false, false) +} + +// === rate(node ValueTypeMatrix) Vector === +func funcRate(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return extrapolatedRate(vals, args, enh, true, true) +} + +// === increase(node ValueTypeMatrix) Vector === +func funcIncrease(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return extrapolatedRate(vals, args, enh, true, false) +} + +// === irate(node ValueTypeMatrix) Vector === +func funcIrate(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return instantValue(vals, enh.out, true) +} + +// === idelta(node model.ValMatric) Vector === +func funcIdelta(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return instantValue(vals, enh.out, false) +} + +func instantValue(vals []Value, out Vector, isRate bool) Vector { + for _, samples := range vals[0].(Matrix) { + // No sense in trying to compute a rate without at least two points. Drop + // this Vector element. + if len(samples.Points) < 2 { + continue + } + + lastSample := samples.Points[len(samples.Points)-1] + previousSample := samples.Points[len(samples.Points)-2] + + var resultValue float64 + if isRate && lastSample.V < previousSample.V { + // Counter reset. + resultValue = lastSample.V + } else { + resultValue = lastSample.V - previousSample.V + } + + sampledInterval := lastSample.T - previousSample.T + if sampledInterval == 0 { + // Avoid dividing by 0. + continue + } + + if isRate { + // Convert to per-second. + resultValue /= float64(sampledInterval) / 1000 + } + + out = append(out, Sample{ + Point: Point{V: resultValue}, + }) + } + return out +} + +// Calculate the trend value at the given index i in raw data d. +// This is somewhat analogous to the slope of the trend at the given index. +// The argument "s" is the set of computed smoothed values. +// The argument "b" is the set of computed trend factors. +// The argument "d" is the set of raw input values. +func calcTrendValue(i int, sf, tf, s0, s1, b float64) float64 { + if i == 0 { + return b + } + + x := tf * (s1 - s0) + y := (1 - tf) * b + + return x + y +} + +// Holt-Winters is similar to a weighted moving average, where historical data has exponentially less influence on the current data. +// Holt-Winter also accounts for trends in data. The smoothing factor (0 < sf < 1) affects how historical data will affect the current +// data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects +// how trends in historical data will affect the current data. A higher trend factor increases the influence. +// of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". +func funcHoltWinters(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + mat := vals[0].(Matrix) + + // The smoothing factor argument. + sf := vals[1].(Vector)[0].V + + // The trend factor argument. + tf := vals[2].(Vector)[0].V + + // Sanity check the input. + if sf <= 0 || sf >= 1 { + panic(fmt.Errorf("invalid smoothing factor. Expected: 0 < sf < 1, got: %f", sf)) + } + if tf <= 0 || tf >= 1 { + panic(fmt.Errorf("invalid trend factor. Expected: 0 < tf < 1, got: %f", tf)) + } + + var l int + for _, samples := range mat { + l = len(samples.Points) + + // Can't do the smoothing operation with less than two points. + if l < 2 { + continue + } + + var s0, s1, b float64 + // Set initial values. + s1 = samples.Points[0].V + b = samples.Points[1].V - samples.Points[0].V + + // Run the smoothing operation. + var x, y float64 + for i := 1; i < l; i++ { + + // Scale the raw value against the smoothing factor. + x = sf * samples.Points[i].V + + // Scale the last smoothed value with the trend at this point. + b = calcTrendValue(i-1, sf, tf, s0, s1, b) + y = (1 - sf) * (s1 + b) + + s0, s1 = s1, x+y + } + + enh.out = append(enh.out, Sample{ + Point: Point{V: s1}, + }) + } + + return enh.out +} + +// === sort(node ValueTypeVector) Vector === +func funcSort(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + // NaN should sort to the bottom, so take descending sort with NaN first and + // reverse it. + byValueSorter := vectorByReverseValueHeap(vals[0].(Vector)) + sort.Sort(sort.Reverse(byValueSorter)) + return Vector(byValueSorter) +} + +// === sortDesc(node ValueTypeVector) Vector === +func funcSortDesc(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + // NaN should sort to the bottom, so take ascending sort with NaN first and + // reverse it. + byValueSorter := vectorByValueHeap(vals[0].(Vector)) + sort.Sort(sort.Reverse(byValueSorter)) + return Vector(byValueSorter) +} + +// === clamp_max(Vector ValueTypeVector, max Scalar) Vector === +func funcClampMax(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + vec := vals[0].(Vector) + max := vals[1].(Vector)[0].Point.V + for _, el := range vec { + enh.out = append(enh.out, Sample{ + Metric: enh.dropMetricName(el.Metric), + Point: Point{V: math.Min(max, el.V)}, + }) + } + return enh.out +} + +// === clamp_min(Vector ValueTypeVector, min Scalar) Vector === +func funcClampMin(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + vec := vals[0].(Vector) + min := vals[1].(Vector)[0].Point.V + for _, el := range vec { + enh.out = append(enh.out, Sample{ + Metric: enh.dropMetricName(el.Metric), + Point: Point{V: math.Max(min, el.V)}, + }) + } + return enh.out +} + +// === round(Vector ValueTypeVector, toNearest=1 Scalar) Vector === +func funcRound(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + vec := vals[0].(Vector) + // round returns a number rounded to toNearest. + // Ties are solved by rounding up. + toNearest := float64(1) + if len(args) >= 2 { + toNearest = vals[1].(Vector)[0].Point.V + } + // Invert as it seems to cause fewer floating point accuracy issues. + toNearestInverse := 1.0 / toNearest + + for _, el := range vec { + v := math.Floor(el.V*toNearestInverse+0.5) / toNearestInverse + enh.out = append(enh.out, Sample{ + Metric: enh.dropMetricName(el.Metric), + Point: Point{V: v}, + }) + } + return enh.out +} + +// === Scalar(node ValueTypeVector) Scalar === +func funcScalar(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + v := vals[0].(Vector) + if len(v) != 1 { + return append(enh.out, Sample{ + Point: Point{V: math.NaN()}, + }) + } + return append(enh.out, Sample{ + Point: Point{V: v[0].V}, + }) +} + +func aggrOverTime(vals []Value, enh *EvalNodeHelper, aggrFn func([]Point) float64) Vector { + mat := vals[0].(Matrix) + + for _, el := range mat { + if len(el.Points) == 0 { + continue + } + + enh.out = append(enh.out, Sample{ + Point: Point{V: aggrFn(el.Points)}, + }) + } + return enh.out +} + +// === avg_over_time(Matrix ValueTypeMatrix) Vector === +func funcAvgOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + var sum float64 + for _, v := range values { + sum += v.V + } + return sum / float64(len(values)) + }) +} + +// === count_over_time(Matrix ValueTypeMatrix) Vector === +func funcCountOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + return float64(len(values)) + }) +} + +// === floor(Vector ValueTypeVector) Vector === +// === max_over_time(Matrix ValueTypeMatrix) Vector === +func funcMaxOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + max := math.Inf(-1) + for _, v := range values { + max = math.Max(max, v.V) + } + return max + }) +} + +// === min_over_time(Matrix ValueTypeMatrix) Vector === +func funcMinOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + min := math.Inf(1) + for _, v := range values { + min = math.Min(min, v.V) + } + return min + }) +} + +// === sum_over_time(Matrix ValueTypeMatrix) Vector === +func funcSumOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + var sum float64 + for _, v := range values { + sum += v.V + } + return sum + }) +} + +// === quantile_over_time(Matrix ValueTypeMatrix) Vector === +func funcQuantileOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + q := vals[0].(Vector)[0].V + mat := vals[1].(Matrix) + + for _, el := range mat { + if len(el.Points) == 0 { + continue + } + + values := make(vectorByValueHeap, 0, len(el.Points)) + for _, v := range el.Points { + values = append(values, Sample{Point: Point{V: v.V}}) + } + enh.out = append(enh.out, Sample{ + Point: Point{V: quantile(q, values)}, + }) + } + return enh.out +} + +// === stddev_over_time(Matrix ValueTypeMatrix) Vector === +func funcStddevOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + var sum, squaredSum, count float64 + for _, v := range values { + sum += v.V + squaredSum += v.V * v.V + count++ + } + avg := sum / count + return math.Sqrt(squaredSum/count - avg*avg) + }) +} + +// === stdvar_over_time(Matrix ValueTypeMatrix) Vector === +func funcStdvarOverTime(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return aggrOverTime(vals, enh, func(values []Point) float64 { + var sum, squaredSum, count float64 + for _, v := range values { + sum += v.V + squaredSum += v.V * v.V + count++ + } + avg := sum / count + return squaredSum/count - avg*avg + }) +} + +// === absent(Vector ValueTypeVector) Vector === +func funcAbsent(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + if len(vals[0].(Vector)) > 0 { + return enh.out + } + m := []labels.Label{} + + if vs, ok := args[0].(*VectorSelector); ok { + for _, ma := range vs.LabelMatchers { + if ma.Type == labels.MatchEqual && ma.Name != labels.MetricName { + m = append(m, labels.Label{Name: ma.Name, Value: ma.Value}) + } + } + } + return append(enh.out, + Sample{ + Metric: labels.New(m...), + Point: Point{V: 1}, + }) +} + +func simpleFunc(vals []Value, enh *EvalNodeHelper, f func(float64) float64) Vector { + for _, el := range vals[0].(Vector) { + enh.out = append(enh.out, Sample{ + Metric: enh.dropMetricName(el.Metric), + Point: Point{V: f(el.V)}, + }) + } + return enh.out +} + +// === abs(Vector ValueTypeVector) Vector === +func funcAbs(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Abs) +} + +// === ceil(Vector ValueTypeVector) Vector === +func funcCeil(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Ceil) +} + +// === floor(Vector ValueTypeVector) Vector === +func funcFloor(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Floor) +} + +// === exp(Vector ValueTypeVector) Vector === +func funcExp(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Exp) +} + +// === sqrt(Vector VectorNode) Vector === +func funcSqrt(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Sqrt) +} + +// === ln(Vector ValueTypeVector) Vector === +func funcLn(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Log) +} + +// === log2(Vector ValueTypeVector) Vector === +func funcLog2(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Log2) +} + +// === log10(Vector ValueTypeVector) Vector === +func funcLog10(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return simpleFunc(vals, enh, math.Log10) +} + +// === timestamp(Vector ValueTypeVector) Vector === +func funcTimestamp(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + vec := vals[0].(Vector) + for _, el := range vec { + enh.out = append(enh.out, Sample{ + Metric: enh.dropMetricName(el.Metric), + Point: Point{V: float64(el.T) / 1000}, + }) + } + return enh.out +} + +// linearRegression performs a least-square linear regression analysis on the +// provided SamplePairs. It returns the slope, and the intercept value at the +// provided time. +func linearRegression(samples []Point, interceptTime int64) (slope, intercept float64) { + var ( + n float64 + sumX, sumY float64 + sumXY, sumX2 float64 + ) + for _, sample := range samples { + x := float64(sample.T-interceptTime) / 1e3 + n += 1.0 + sumY += sample.V + sumX += x + sumXY += x * sample.V + sumX2 += x * x + } + covXY := sumXY - sumX*sumY/n + varX := sumX2 - sumX*sumX/n + + slope = covXY / varX + intercept = sumY/n - slope*sumX/n + return slope, intercept +} + +// === deriv(node ValueTypeMatrix) Vector === +func funcDeriv(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + mat := vals[0].(Matrix) + + for _, samples := range mat { + // No sense in trying to compute a derivative without at least two points. + // Drop this Vector element. + if len(samples.Points) < 2 { + continue + } + + // We pass in an arbitrary timestamp that is near the values in use + // to avoid floating point accuracy issues, see + // https://github.com/prometheus/prometheus/issues/2674 + slope, _ := linearRegression(samples.Points, samples.Points[0].T) + enh.out = append(enh.out, Sample{ + Point: Point{V: slope}, + }) + } + return enh.out +} + +// === predict_linear(node ValueTypeMatrix, k ValueTypeScalar) Vector === +func funcPredictLinear(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + mat := vals[0].(Matrix) + duration := vals[1].(Vector)[0].V + + for _, samples := range mat { + // No sense in trying to predict anything without at least two points. + // Drop this Vector element. + if len(samples.Points) < 2 { + continue + } + slope, intercept := linearRegression(samples.Points, enh.ts) + + enh.out = append(enh.out, Sample{ + Point: Point{V: slope*duration + intercept}, + }) + } + return enh.out +} + +// === histogram_quantile(k ValueTypeScalar, Vector ValueTypeVector) Vector === +func funcHistogramQuantile(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + q := vals[0].(Vector)[0].V + inVec := vals[1].(Vector) + sigf := enh.signatureFunc(false, excludedLabels...) + + if enh.signatureToMetricWithBuckets == nil { + enh.signatureToMetricWithBuckets = map[uint64]*metricWithBuckets{} + } else { + for _, v := range enh.signatureToMetricWithBuckets { + v.buckets = v.buckets[:0] + } + } + for _, el := range inVec { + upperBound, err := strconv.ParseFloat( + el.Metric.Get(model.BucketLabel), 64, + ) + if err != nil { + // Oops, no bucket label or malformed label value. Skip. + // TODO(beorn7): Issue a warning somehow. + continue + } + hash := sigf(el.Metric) + + mb, ok := enh.signatureToMetricWithBuckets[hash] + if !ok { + el.Metric = labels.NewBuilder(el.Metric). + Del(labels.BucketLabel, labels.MetricName). + Labels() + + mb = &metricWithBuckets{el.Metric, nil} + enh.signatureToMetricWithBuckets[hash] = mb + } + mb.buckets = append(mb.buckets, bucket{upperBound, el.V}) + } + + for _, mb := range enh.signatureToMetricWithBuckets { + if len(mb.buckets) > 0 { + enh.out = append(enh.out, Sample{ + Metric: mb.metric, + Point: Point{V: bucketQuantile(q, mb.buckets)}, + }) + } + } + + return enh.out +} + +// === resets(Matrix ValueTypeMatrix) Vector === +func funcResets(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + in := vals[0].(Matrix) + + for _, samples := range in { + resets := 0 + prev := samples.Points[0].V + for _, sample := range samples.Points[1:] { + current := sample.V + if current < prev { + resets++ + } + prev = current + } + + enh.out = append(enh.out, Sample{ + Point: Point{V: float64(resets)}, + }) + } + return enh.out +} + +// === changes(Matrix ValueTypeMatrix) Vector === +func funcChanges(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + in := vals[0].(Matrix) + + for _, samples := range in { + changes := 0 + prev := samples.Points[0].V + for _, sample := range samples.Points[1:] { + current := sample.V + if current != prev && !(math.IsNaN(current) && math.IsNaN(prev)) { + changes++ + } + prev = current + } + + enh.out = append(enh.out, Sample{ + Point: Point{V: float64(changes)}, + }) + } + return enh.out +} + +// === label_replace(Vector ValueTypeVector, dst_label, replacement, src_labelname, regex ValueTypeString) Vector === +func funcLabelReplace(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + var ( + vector = vals[0].(Vector) + dst = args[1].(*StringLiteral).Val + repl = args[2].(*StringLiteral).Val + src = args[3].(*StringLiteral).Val + regexStr = args[4].(*StringLiteral).Val + ) + + if enh.regex == nil { + var err error + enh.regex, err = regexp.Compile("^(?:" + regexStr + ")$") + if err != nil { + panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) + } + if !model.LabelNameRE.MatchString(dst) { + panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) + } + enh.dmn = make(map[uint64]labels.Labels, len(enh.out)) + } + + outSet := make(map[uint64]struct{}, len(vector)) + for _, el := range vector { + h := el.Metric.Hash() + var outMetric labels.Labels + if l, ok := enh.dmn[h]; ok { + outMetric = l + } else { + srcVal := el.Metric.Get(src) + indexes := enh.regex.FindStringSubmatchIndex(srcVal) + if indexes == nil { + // If there is no match, no replacement should take place. + outMetric = el.Metric + enh.dmn[h] = outMetric + } else { + res := enh.regex.ExpandString([]byte{}, repl, srcVal, indexes) + + lb := labels.NewBuilder(el.Metric).Del(dst) + if len(res) > 0 { + lb.Set(dst, string(res)) + } + outMetric = lb.Labels() + enh.dmn[h] = outMetric + } + } + + outHash := outMetric.Hash() + if _, ok := outSet[outHash]; ok { + panic(fmt.Errorf("duplicated label set in output of label_replace(): %s", el.Metric)) + } else { + enh.out = append(enh.out, + Sample{ + Metric: outMetric, + Point: Point{V: el.Point.V}, + }) + outSet[outHash] = struct{}{} + } + } + return enh.out +} + +// === Vector(s Scalar) Vector === +func funcVector(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return append(enh.out, + Sample{ + Metric: labels.Labels{}, + Point: Point{V: vals[0].(Vector)[0].V}, + }) +} + +// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) Vector === +func funcLabelJoin(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + var ( + vector = vals[0].(Vector) + dst = args[1].(*StringLiteral).Val + sep = args[2].(*StringLiteral).Val + srcLabels = make([]string, len(args)-3) + ) + + if enh.dmn == nil { + enh.dmn = make(map[uint64]labels.Labels, len(enh.out)) + } + + for i := 3; i < len(args); i++ { + src := args[i].(*StringLiteral).Val + if !model.LabelName(src).IsValid() { + panic(fmt.Errorf("invalid source label name in label_join(): %s", src)) + } + srcLabels[i-3] = src + } + + if !model.LabelName(dst).IsValid() { + panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst)) + } + + outSet := make(map[uint64]struct{}, len(vector)) + srcVals := make([]string, len(srcLabels)) + for _, el := range vector { + h := el.Metric.Hash() + var outMetric labels.Labels + if l, ok := enh.dmn[h]; ok { + outMetric = l + } else { + + for i, src := range srcLabels { + srcVals[i] = el.Metric.Get(src) + } + + lb := labels.NewBuilder(el.Metric) + + strval := strings.Join(srcVals, sep) + if strval == "" { + lb.Del(dst) + } else { + lb.Set(dst, strval) + } + + outMetric = lb.Labels() + enh.dmn[h] = outMetric + } + outHash := outMetric.Hash() + + if _, exists := outSet[outHash]; exists { + panic(fmt.Errorf("duplicated label set in output of label_join(): %s", el.Metric)) + } else { + enh.out = append(enh.out, Sample{ + Metric: outMetric, + Point: Point{V: el.Point.V}, + }) + outSet[outHash] = struct{}{} + } + } + return enh.out +} + +// Common code for date related functions. +func dateWrapper(vals []Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector { + if len(vals) == 0 { + return append(enh.out, + Sample{ + Metric: labels.Labels{}, + Point: Point{V: f(time.Unix(enh.ts/1000, 0).UTC())}, + }) + } + + for _, el := range vals[0].(Vector) { + t := time.Unix(int64(el.V), 0).UTC() + enh.out = append(enh.out, Sample{ + Metric: enh.dropMetricName(el.Metric), + Point: Point{V: f(t)}, + }) + } + return enh.out +} + +// === days_in_month(v Vector) Scalar === +func funcDaysInMonth(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return dateWrapper(vals, enh, func(t time.Time) float64 { + return float64(32 - time.Date(t.Year(), t.Month(), 32, 0, 0, 0, 0, time.UTC).Day()) + }) +} + +// === day_of_month(v Vector) Scalar === +func funcDayOfMonth(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return dateWrapper(vals, enh, func(t time.Time) float64 { + return float64(t.Day()) + }) +} + +// === day_of_week(v Vector) Scalar === +func funcDayOfWeek(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return dateWrapper(vals, enh, func(t time.Time) float64 { + return float64(t.Weekday()) + }) +} + +// === hour(v Vector) Scalar === +func funcHour(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return dateWrapper(vals, enh, func(t time.Time) float64 { + return float64(t.Hour()) + }) +} + +// === minute(v Vector) Scalar === +func funcMinute(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return dateWrapper(vals, enh, func(t time.Time) float64 { + return float64(t.Minute()) + }) +} + +// === month(v Vector) Scalar === +func funcMonth(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return dateWrapper(vals, enh, func(t time.Time) float64 { + return float64(t.Month()) + }) +} + +// === year(v Vector) Scalar === +func funcYear(vals []Value, args Expressions, enh *EvalNodeHelper) Vector { + return dateWrapper(vals, enh, func(t time.Time) float64 { + return float64(t.Year()) + }) +} + +var functions = map[string]*Function{ + "abs": { + Name: "abs", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + Call: funcAbs, + }, + "absent": { + Name: "absent", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + Call: funcAbsent, + }, + "avg_over_time": { + Name: "avg_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcAvgOverTime, + }, + "ceil": { + Name: "ceil", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + Call: funcCeil, + }, + "changes": { + Name: "changes", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcChanges, + }, + "clamp_max": { + Name: "clamp_max", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar}, + ReturnType: ValueTypeVector, + Call: funcClampMax, + }, + "clamp_min": { + Name: "clamp_min", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar}, + ReturnType: ValueTypeVector, + Call: funcClampMin, + }, + "count_over_time": { + Name: "count_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcCountOverTime, + }, + "days_in_month": { + Name: "days_in_month", + ArgTypes: []ValueType{ValueTypeVector}, + Variadic: 1, + ReturnType: ValueTypeVector, + Call: funcDaysInMonth, + }, + "day_of_month": { + Name: "day_of_month", + ArgTypes: []ValueType{ValueTypeVector}, + Variadic: 1, + ReturnType: ValueTypeVector, + Call: funcDayOfMonth, + }, + "day_of_week": { + Name: "day_of_week", + ArgTypes: []ValueType{ValueTypeVector}, + Variadic: 1, + ReturnType: ValueTypeVector, + Call: funcDayOfWeek, + }, + "delta": { + Name: "delta", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcDelta, + }, + "deriv": { + Name: "deriv", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcDeriv, + }, + "exp": { + Name: "exp", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + Call: funcExp, + }, + "floor": { + Name: "floor", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + Call: funcFloor, + }, + "histogram_quantile": { + Name: "histogram_quantile", + ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, + ReturnType: ValueTypeVector, + Call: funcHistogramQuantile, + }, + "holt_winters": { + Name: "holt_winters", + ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, + ReturnType: ValueTypeVector, + Call: funcHoltWinters, + }, + "hour": { + Name: "hour", + ArgTypes: []ValueType{ValueTypeVector}, + Variadic: 1, + ReturnType: ValueTypeVector, + Call: funcHour, + }, + "idelta": { + Name: "idelta", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcIdelta, + }, + "increase": { + Name: "increase", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcIncrease, + }, + "irate": { + Name: "irate", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcIrate, + }, + "label_replace": { + Name: "label_replace", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeString, ValueTypeString, ValueTypeString, ValueTypeString}, + ReturnType: ValueTypeVector, + Call: funcLabelReplace, + }, + "label_join": { + Name: "label_join", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeString, ValueTypeString, ValueTypeString}, + Variadic: -1, + ReturnType: ValueTypeVector, + Call: funcLabelJoin, + }, + "ln": { + Name: "ln", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + Call: funcLn, + }, + "log10": { + Name: "log10", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + Call: funcLog10, + }, + "log2": { + Name: "log2", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + Call: funcLog2, + }, + "max_over_time": { + Name: "max_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcMaxOverTime, + }, + "min_over_time": { + Name: "min_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcMinOverTime, + }, + "minute": { + Name: "minute", + ArgTypes: []ValueType{ValueTypeVector}, + Variadic: 1, + ReturnType: ValueTypeVector, + Call: funcMinute, + }, + "month": { + Name: "month", + ArgTypes: []ValueType{ValueTypeVector}, + Variadic: 1, + ReturnType: ValueTypeVector, + Call: funcMonth, + }, + "predict_linear": { + Name: "predict_linear", + ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar}, + ReturnType: ValueTypeVector, + Call: funcPredictLinear, + }, + "quantile_over_time": { + Name: "quantile_over_time", + ArgTypes: []ValueType{ValueTypeScalar, ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcQuantileOverTime, + }, + "rate": { + Name: "rate", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcRate, + }, + "resets": { + Name: "resets", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcResets, + }, + "round": { + Name: "round", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar}, + Variadic: 1, + ReturnType: ValueTypeVector, + Call: funcRound, + }, + "scalar": { + Name: "scalar", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeScalar, + Call: funcScalar, + }, + "sort": { + Name: "sort", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + Call: funcSort, + }, + "sort_desc": { + Name: "sort_desc", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + Call: funcSortDesc, + }, + "sqrt": { + Name: "sqrt", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + Call: funcSqrt, + }, + "stddev_over_time": { + Name: "stddev_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcStddevOverTime, + }, + "stdvar_over_time": { + Name: "stdvar_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcStdvarOverTime, + }, + "sum_over_time": { + Name: "sum_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + Call: funcSumOverTime, + }, + "time": { + Name: "time", + ArgTypes: []ValueType{}, + ReturnType: ValueTypeScalar, + Call: funcTime, + }, + "timestamp": { + Name: "timestamp", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + Call: funcTimestamp, + }, + "vector": { + Name: "vector", + ArgTypes: []ValueType{ValueTypeScalar}, + ReturnType: ValueTypeVector, + Call: funcVector, + }, + "year": { + Name: "year", + ArgTypes: []ValueType{ValueTypeVector}, + Variadic: 1, + ReturnType: ValueTypeVector, + Call: funcYear, + }, +} + +// getFunction returns a predefined Function object for the given name. +func getFunction(name string) (*Function, bool) { + function, ok := functions[name] + return function, ok +} + +type vectorByValueHeap Vector + +func (s vectorByValueHeap) Len() int { + return len(s) +} + +func (s vectorByValueHeap) Less(i, j int) bool { + if math.IsNaN(s[i].V) { + return true + } + return s[i].V < s[j].V +} + +func (s vectorByValueHeap) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s *vectorByValueHeap) Push(x interface{}) { + *s = append(*s, *(x.(*Sample))) +} + +func (s *vectorByValueHeap) Pop() interface{} { + old := *s + n := len(old) + el := old[n-1] + *s = old[0 : n-1] + return el +} + +type vectorByReverseValueHeap Vector + +func (s vectorByReverseValueHeap) Len() int { + return len(s) +} + +func (s vectorByReverseValueHeap) Less(i, j int) bool { + if math.IsNaN(s[i].V) { + return true + } + return s[i].V > s[j].V +} + +func (s vectorByReverseValueHeap) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s *vectorByReverseValueHeap) Push(x interface{}) { + *s = append(*s, *(x.(*Sample))) +} + +func (s *vectorByReverseValueHeap) Pop() interface{} { + old := *s + n := len(old) + el := old[n-1] + *s = old[0 : n-1] + return el +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/fuzz.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/fuzz.go new file mode 100644 index 000000000000..40ab846ddd72 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/fuzz.go @@ -0,0 +1,92 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Only build when go-fuzz is in use +// +build gofuzz + +package promql + +import "github.com/prometheus/prometheus/pkg/textparse" + +// PromQL parser fuzzing instrumentation for use with +// https://github.com/dvyukov/go-fuzz. +// +// Fuzz each parser by building appropriately instrumented parser, ex. +// FuzzParseMetric and execute it with it's +// +// go-fuzz-build -func FuzzParseMetric -o FuzzParseMetric.zip github.com/prometheus/prometheus/promql +// +// And then run the tests with the appropriate inputs +// +// go-fuzz -bin FuzzParseMetric.zip -workdir fuzz-data/ParseMetric +// +// Further input samples should go in the folders fuzz-data/ParseMetric/corpus. +// +// Repeat for ParseMetricSeletion, ParseExpr and ParseStmt. + +// Tuning which value is returned from Fuzz*-functions has a strong influence +// on how quick the fuzzer converges on "interesting" cases. At least try +// switching between fuzzMeh (= included in corpus, but not a priority) and +// fuzzDiscard (=don't use this input for re-building later inputs) when +// experimenting. +const ( + fuzzInteresting = 1 + fuzzMeh = 0 + fuzzDiscard = -1 +) + +// Fuzz the metric parser. +// +// Note that his is not the parser for the text-based exposition-format; that +// lives in github.com/prometheus/client_golang/text. +func FuzzParseMetric(in []byte) int { + p := textparse.New(in) + for p.Next() { + } + + if p.Err() == nil { + return fuzzInteresting + } + + return fuzzMeh +} + +// Fuzz the metric selector parser. +func FuzzParseMetricSelector(in []byte) int { + _, err := ParseMetricSelector(string(in)) + if err == nil { + return fuzzInteresting + } + + return fuzzMeh +} + +// Fuzz the expression parser. +func FuzzParseExpr(in []byte) int { + _, err := ParseExpr(string(in)) + if err == nil { + return fuzzInteresting + } + + return fuzzMeh +} + +// Fuzz the parser. +func FuzzParseStmts(in []byte) int { + _, err := ParseStmts(string(in)) + if err == nil { + return fuzzInteresting + } + + return fuzzMeh +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/lex.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/lex.go new file mode 100644 index 000000000000..bb2476cf69e4 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/lex.go @@ -0,0 +1,906 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// item represents a token or text string returned from the scanner. +type item struct { + typ ItemType // The type of this item. + pos Pos // The starting position, in bytes, of this item in the input string. + val string // The value of this item. +} + +// String returns a descriptive string for the item. +func (i item) String() string { + switch { + case i.typ == itemEOF: + return "EOF" + case i.typ == itemError: + return i.val + case i.typ == itemIdentifier || i.typ == itemMetricIdentifier: + return fmt.Sprintf("%q", i.val) + case i.typ.isKeyword(): + return fmt.Sprintf("<%s>", i.val) + case i.typ.isOperator(): + return fmt.Sprintf("", i.val) + case i.typ.isAggregator(): + return fmt.Sprintf("", i.val) + case len(i.val) > 10: + return fmt.Sprintf("%.10q...", i.val) + } + return fmt.Sprintf("%q", i.val) +} + +// isOperator returns true if the item corresponds to a arithmetic or set operator. +// Returns false otherwise. +func (i ItemType) isOperator() bool { return i > operatorsStart && i < operatorsEnd } + +// isAggregator returns true if the item belongs to the aggregator functions. +// Returns false otherwise +func (i ItemType) isAggregator() bool { return i > aggregatorsStart && i < aggregatorsEnd } + +// isAggregator returns true if the item is an aggregator that takes a parameter. +// Returns false otherwise +func (i ItemType) isAggregatorWithParam() bool { + return i == itemTopK || i == itemBottomK || i == itemCountValues || i == itemQuantile +} + +// isKeyword returns true if the item corresponds to a keyword. +// Returns false otherwise. +func (i ItemType) isKeyword() bool { return i > keywordsStart && i < keywordsEnd } + +// isCompairsonOperator returns true if the item corresponds to a comparison operator. +// Returns false otherwise. +func (i ItemType) isComparisonOperator() bool { + switch i { + case itemEQL, itemNEQ, itemLTE, itemLSS, itemGTE, itemGTR: + return true + default: + return false + } +} + +// isSetOperator returns whether the item corresponds to a set operator. +func (i ItemType) isSetOperator() bool { + switch i { + case itemLAND, itemLOR, itemLUnless: + return true + } + return false +} + +// LowestPrec is a constant for operator precedence in expressions. +const LowestPrec = 0 // Non-operators. + +// Precedence returns the operator precedence of the binary +// operator op. If op is not a binary operator, the result +// is LowestPrec. +func (i ItemType) precedence() int { + switch i { + case itemLOR: + return 1 + case itemLAND, itemLUnless: + return 2 + case itemEQL, itemNEQ, itemLTE, itemLSS, itemGTE, itemGTR: + return 3 + case itemADD, itemSUB: + return 4 + case itemMUL, itemDIV, itemMOD: + return 5 + case itemPOW: + return 6 + default: + return LowestPrec + } +} + +func (i ItemType) isRightAssociative() bool { + switch i { + case itemPOW: + return true + default: + return false + } + +} + +type ItemType int + +const ( + itemError ItemType = iota // Error occurred, value is error message + itemEOF + itemComment + itemIdentifier + itemMetricIdentifier + itemLeftParen + itemRightParen + itemLeftBrace + itemRightBrace + itemLeftBracket + itemRightBracket + itemComma + itemAssign + itemSemicolon + itemString + itemNumber + itemDuration + itemBlank + itemTimes + + operatorsStart + // Operators. + itemSUB + itemADD + itemMUL + itemMOD + itemDIV + itemLAND + itemLOR + itemLUnless + itemEQL + itemNEQ + itemLTE + itemLSS + itemGTE + itemGTR + itemEQLRegex + itemNEQRegex + itemPOW + operatorsEnd + + aggregatorsStart + // Aggregators. + itemAvg + itemCount + itemSum + itemMin + itemMax + itemStddev + itemStdvar + itemTopK + itemBottomK + itemCountValues + itemQuantile + aggregatorsEnd + + keywordsStart + // Keywords. + itemAlert + itemIf + itemFor + itemLabels + itemAnnotations + itemOffset + itemBy + itemWithout + itemOn + itemIgnoring + itemGroupLeft + itemGroupRight + itemBool + keywordsEnd +) + +var key = map[string]ItemType{ + // Operators. + "and": itemLAND, + "or": itemLOR, + "unless": itemLUnless, + + // Aggregators. + "sum": itemSum, + "avg": itemAvg, + "count": itemCount, + "min": itemMin, + "max": itemMax, + "stddev": itemStddev, + "stdvar": itemStdvar, + "topk": itemTopK, + "bottomk": itemBottomK, + "count_values": itemCountValues, + "quantile": itemQuantile, + + // Keywords. + "alert": itemAlert, + "if": itemIf, + "for": itemFor, + "labels": itemLabels, + "annotations": itemAnnotations, + "offset": itemOffset, + "by": itemBy, + "without": itemWithout, + "on": itemOn, + "ignoring": itemIgnoring, + "group_left": itemGroupLeft, + "group_right": itemGroupRight, + "bool": itemBool, +} + +// These are the default string representations for common items. It does not +// imply that those are the only character sequences that can be lexed to such an item. +var itemTypeStr = map[ItemType]string{ + itemLeftParen: "(", + itemRightParen: ")", + itemLeftBrace: "{", + itemRightBrace: "}", + itemLeftBracket: "[", + itemRightBracket: "]", + itemComma: ",", + itemAssign: "=", + itemSemicolon: ";", + itemBlank: "_", + itemTimes: "x", + + itemSUB: "-", + itemADD: "+", + itemMUL: "*", + itemMOD: "%", + itemDIV: "/", + itemEQL: "==", + itemNEQ: "!=", + itemLTE: "<=", + itemLSS: "<", + itemGTE: ">=", + itemGTR: ">", + itemEQLRegex: "=~", + itemNEQRegex: "!~", + itemPOW: "^", +} + +func init() { + // Add keywords to item type strings. + for s, ty := range key { + itemTypeStr[ty] = s + } + // Special numbers. + key["inf"] = itemNumber + key["nan"] = itemNumber +} + +func (i ItemType) String() string { + if s, ok := itemTypeStr[i]; ok { + return s + } + return fmt.Sprintf("", i) +} + +func (i item) desc() string { + if _, ok := itemTypeStr[i.typ]; ok { + return i.String() + } + if i.typ == itemEOF { + return i.typ.desc() + } + return fmt.Sprintf("%s %s", i.typ.desc(), i) +} + +func (i ItemType) desc() string { + switch i { + case itemError: + return "error" + case itemEOF: + return "end of input" + case itemComment: + return "comment" + case itemIdentifier: + return "identifier" + case itemMetricIdentifier: + return "metric identifier" + case itemString: + return "string" + case itemNumber: + return "number" + case itemDuration: + return "duration" + } + return fmt.Sprintf("%q", i) +} + +const eof = -1 + +// stateFn represents the state of the scanner as a function that returns the next state. +type stateFn func(*lexer) stateFn + +// Pos is the position in a string. +type Pos int + +// lexer holds the state of the scanner. +type lexer struct { + input string // The string being scanned. + state stateFn // The next lexing function to enter. + pos Pos // Current position in the input. + start Pos // Start position of this item. + width Pos // Width of last rune read from input. + lastPos Pos // Position of most recent item returned by nextItem. + items chan item // Channel of scanned items. + + parenDepth int // Nesting depth of ( ) exprs. + braceOpen bool // Whether a { is opened. + bracketOpen bool // Whether a [ is opened. + stringOpen rune // Quote rune of the string currently being read. + + // seriesDesc is set when a series description for the testing + // language is lexed. + seriesDesc bool +} + +// next returns the next rune in the input. +func (l *lexer) next() rune { + if int(l.pos) >= len(l.input) { + l.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(l.input[l.pos:]) + l.width = Pos(w) + l.pos += l.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (l *lexer) peek() rune { + r := l.next() + l.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (l *lexer) backup() { + l.pos -= l.width +} + +// emit passes an item back to the client. +func (l *lexer) emit(t ItemType) { + l.items <- item{t, l.start, l.input[l.start:l.pos]} + l.start = l.pos +} + +// ignore skips over the pending input before this point. +func (l *lexer) ignore() { + l.start = l.pos +} + +// accept consumes the next rune if it's from the valid set. +func (l *lexer) accept(valid string) bool { + if strings.ContainsRune(valid, l.next()) { + return true + } + l.backup() + return false +} + +// acceptRun consumes a run of runes from the valid set. +func (l *lexer) acceptRun(valid string) { + for strings.ContainsRune(valid, l.next()) { + // consume + } + l.backup() +} + +// lineNumber reports which line we're on, based on the position of +// the previous item returned by nextItem. Doing it this way +// means we don't have to worry about peek double counting. +func (l *lexer) lineNumber() int { + return 1 + strings.Count(l.input[:l.lastPos], "\n") +} + +// linePosition reports at which character in the current line +// we are on. +func (l *lexer) linePosition() int { + lb := strings.LastIndex(l.input[:l.lastPos], "\n") + if lb == -1 { + return 1 + int(l.lastPos) + } + return 1 + int(l.lastPos) - lb +} + +// errorf returns an error token and terminates the scan by passing +// back a nil pointer that will be the next state, terminating l.nextItem. +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} + return nil +} + +// nextItem returns the next item from the input. +func (l *lexer) nextItem() item { + item := <-l.items + l.lastPos = item.pos + return item +} + +// lex creates a new scanner for the input string. +func lex(input string) *lexer { + l := &lexer{ + input: input, + items: make(chan item), + } + go l.run() + return l +} + +// run runs the state machine for the lexer. +func (l *lexer) run() { + for l.state = lexStatements; l.state != nil; { + l.state = l.state(l) + } + close(l.items) +} + +// lineComment is the character that starts a line comment. +const lineComment = "#" + +// lexStatements is the top-level state for lexing. +func lexStatements(l *lexer) stateFn { + if l.braceOpen { + return lexInsideBraces + } + if strings.HasPrefix(l.input[l.pos:], lineComment) { + return lexLineComment + } + + switch r := l.next(); { + case r == eof: + if l.parenDepth != 0 { + return l.errorf("unclosed left parenthesis") + } else if l.bracketOpen { + return l.errorf("unclosed left bracket") + } + l.emit(itemEOF) + return nil + case r == ',': + l.emit(itemComma) + case isSpace(r): + return lexSpace + case r == '*': + l.emit(itemMUL) + case r == '/': + l.emit(itemDIV) + case r == '%': + l.emit(itemMOD) + case r == '+': + l.emit(itemADD) + case r == '-': + l.emit(itemSUB) + case r == '^': + l.emit(itemPOW) + case r == '=': + if t := l.peek(); t == '=' { + l.next() + l.emit(itemEQL) + } else if t == '~' { + return l.errorf("unexpected character after '=': %q", t) + } else { + l.emit(itemAssign) + } + case r == '!': + if t := l.next(); t == '=' { + l.emit(itemNEQ) + } else { + return l.errorf("unexpected character after '!': %q", t) + } + case r == '<': + if t := l.peek(); t == '=' { + l.next() + l.emit(itemLTE) + } else { + l.emit(itemLSS) + } + case r == '>': + if t := l.peek(); t == '=' { + l.next() + l.emit(itemGTE) + } else { + l.emit(itemGTR) + } + case isDigit(r) || (r == '.' && isDigit(l.peek())): + l.backup() + return lexNumberOrDuration + case r == '"' || r == '\'': + l.stringOpen = r + return lexString + case r == '`': + l.stringOpen = r + return lexRawString + case isAlpha(r) || r == ':': + l.backup() + return lexKeywordOrIdentifier + case r == '(': + l.emit(itemLeftParen) + l.parenDepth++ + return lexStatements + case r == ')': + l.emit(itemRightParen) + l.parenDepth-- + if l.parenDepth < 0 { + return l.errorf("unexpected right parenthesis %q", r) + } + return lexStatements + case r == '{': + l.emit(itemLeftBrace) + l.braceOpen = true + return lexInsideBraces(l) + case r == '[': + if l.bracketOpen { + return l.errorf("unexpected left bracket %q", r) + } + l.emit(itemLeftBracket) + l.bracketOpen = true + return lexDuration + case r == ']': + if !l.bracketOpen { + return l.errorf("unexpected right bracket %q", r) + } + l.emit(itemRightBracket) + l.bracketOpen = false + + default: + return l.errorf("unexpected character: %q", r) + } + return lexStatements +} + +// lexInsideBraces scans the inside of a vector selector. Keywords are ignored and +// scanned as identifiers. +func lexInsideBraces(l *lexer) stateFn { + if strings.HasPrefix(l.input[l.pos:], lineComment) { + return lexLineComment + } + + switch r := l.next(); { + case r == eof: + return l.errorf("unexpected end of input inside braces") + case isSpace(r): + return lexSpace + case isAlpha(r): + l.backup() + return lexIdentifier + case r == ',': + l.emit(itemComma) + case r == '"' || r == '\'': + l.stringOpen = r + return lexString + case r == '`': + l.stringOpen = r + return lexRawString + case r == '=': + if l.next() == '~' { + l.emit(itemEQLRegex) + break + } + l.backup() + l.emit(itemEQL) + case r == '!': + switch nr := l.next(); { + case nr == '~': + l.emit(itemNEQRegex) + case nr == '=': + l.emit(itemNEQ) + default: + return l.errorf("unexpected character after '!' inside braces: %q", nr) + } + case r == '{': + return l.errorf("unexpected left brace %q", r) + case r == '}': + l.emit(itemRightBrace) + l.braceOpen = false + + if l.seriesDesc { + return lexValueSequence + } + return lexStatements + default: + return l.errorf("unexpected character inside braces: %q", r) + } + return lexInsideBraces +} + +// lexValueSequence scans a value sequence of a series description. +func lexValueSequence(l *lexer) stateFn { + switch r := l.next(); { + case r == eof: + return lexStatements + case isSpace(r): + lexSpace(l) + case r == '+': + l.emit(itemADD) + case r == '-': + l.emit(itemSUB) + case r == 'x': + l.emit(itemTimes) + case r == '_': + l.emit(itemBlank) + case isDigit(r) || (r == '.' && isDigit(l.peek())): + l.backup() + lexNumber(l) + case isAlpha(r): + l.backup() + // We might lex invalid items here but this will be caught by the parser. + return lexKeywordOrIdentifier + default: + return l.errorf("unexpected character in series sequence: %q", r) + } + return lexValueSequence +} + +// lexEscape scans a string escape sequence. The initial escaping character (\) +// has already been seen. +// +// NOTE: This function as well as the helper function digitVal() and associated +// tests have been adapted from the corresponding functions in the "go/scanner" +// package of the Go standard library to work for Prometheus-style strings. +// None of the actual escaping/quoting logic was changed in this function - it +// was only modified to integrate with our lexer. +func lexEscape(l *lexer) { + var n int + var base, max uint32 + + ch := l.next() + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', l.stringOpen: + return + case '0', '1', '2', '3', '4', '5', '6', '7': + n, base, max = 3, 8, 255 + case 'x': + ch = l.next() + n, base, max = 2, 16, 255 + case 'u': + ch = l.next() + n, base, max = 4, 16, unicode.MaxRune + case 'U': + ch = l.next() + n, base, max = 8, 16, unicode.MaxRune + case eof: + l.errorf("escape sequence not terminated") + default: + l.errorf("unknown escape sequence %#U", ch) + } + + var x uint32 + for n > 0 { + d := uint32(digitVal(ch)) + if d >= base { + if ch == eof { + l.errorf("escape sequence not terminated") + } + l.errorf("illegal character %#U in escape sequence", ch) + } + x = x*base + d + ch = l.next() + n-- + } + + if x > max || 0xD800 <= x && x < 0xE000 { + l.errorf("escape sequence is an invalid Unicode code point") + } +} + +// digitVal returns the digit value of a rune or 16 in case the rune does not +// represent a valid digit. +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // Larger than any legal digit val. +} + +// lexString scans a quoted string. The initial quote has already been seen. +func lexString(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case '\\': + lexEscape(l) + case utf8.RuneError: + return l.errorf("invalid UTF-8 rune") + case eof, '\n': + return l.errorf("unterminated quoted string") + case l.stringOpen: + break Loop + } + } + l.emit(itemString) + return lexStatements +} + +// lexRawString scans a raw quoted string. The initial quote has already been seen. +func lexRawString(l *lexer) stateFn { +Loop: + for { + switch l.next() { + case utf8.RuneError: + return l.errorf("invalid UTF-8 rune") + case eof: + return l.errorf("unterminated raw string") + case l.stringOpen: + break Loop + } + } + l.emit(itemString) + return lexStatements +} + +// lexSpace scans a run of space characters. One space has already been seen. +func lexSpace(l *lexer) stateFn { + for isSpace(l.peek()) { + l.next() + } + l.ignore() + return lexStatements +} + +// lexLineComment scans a line comment. Left comment marker is known to be present. +func lexLineComment(l *lexer) stateFn { + l.pos += Pos(len(lineComment)) + for r := l.next(); !isEndOfLine(r) && r != eof; { + r = l.next() + } + l.backup() + l.emit(itemComment) + return lexStatements +} + +func lexDuration(l *lexer) stateFn { + if l.scanNumber() { + return l.errorf("missing unit character in duration") + } + // Next two chars must be a valid unit and a non-alphanumeric. + if l.accept("smhdwy") { + if isAlphaNumeric(l.next()) { + return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos]) + } + l.backup() + l.emit(itemDuration) + return lexStatements + } + return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos]) +} + +// lexNumber scans a number: decimal, hex, oct or float. +func lexNumber(l *lexer) stateFn { + if !l.scanNumber() { + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + l.emit(itemNumber) + return lexStatements +} + +// lexNumberOrDuration scans a number or a duration item. +func lexNumberOrDuration(l *lexer) stateFn { + if l.scanNumber() { + l.emit(itemNumber) + return lexStatements + } + // Next two chars must be a valid unit and a non-alphanumeric. + if l.accept("smhdwy") { + if isAlphaNumeric(l.next()) { + return l.errorf("bad number or duration syntax: %q", l.input[l.start:l.pos]) + } + l.backup() + l.emit(itemDuration) + return lexStatements + } + return l.errorf("bad number or duration syntax: %q", l.input[l.start:l.pos]) +} + +// scanNumber scans numbers of different formats. The scanned item is +// not necessarily a valid number. This case is caught by the parser. +func (l *lexer) scanNumber() bool { + digits := "0123456789" + // Disallow hexadecimal in series descriptions as the syntax is ambiguous. + if !l.seriesDesc && l.accept("0") && l.accept("xX") { + digits = "0123456789abcdefABCDEF" + } + l.acceptRun(digits) + if l.accept(".") { + l.acceptRun(digits) + } + if l.accept("eE") { + l.accept("+-") + l.acceptRun("0123456789") + } + // Next thing must not be alphanumeric unless it's the times token + // for series repetitions. + if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) { + return true + } + return false +} + +// lexIdentifier scans an alphanumeric identifier. The next character +// is known to be a letter. +func lexIdentifier(l *lexer) stateFn { + for isAlphaNumeric(l.next()) { + // absorb + } + l.backup() + l.emit(itemIdentifier) + return lexStatements +} + +// lexKeywordOrIdentifier scans an alphanumeric identifier which may contain +// a colon rune. If the identifier is a keyword the respective keyword item +// is scanned. +func lexKeywordOrIdentifier(l *lexer) stateFn { +Loop: + for { + switch r := l.next(); { + case isAlphaNumeric(r) || r == ':': + // absorb. + default: + l.backup() + word := l.input[l.start:l.pos] + if kw, ok := key[strings.ToLower(word)]; ok { + l.emit(kw) + } else if !strings.Contains(word, ":") { + l.emit(itemIdentifier) + } else { + l.emit(itemMetricIdentifier) + } + break Loop + } + } + if l.seriesDesc && l.peek() != '{' { + return lexValueSequence + } + return lexStatements +} + +func isSpace(r rune) bool { + return r == ' ' || r == '\t' || r == '\n' || r == '\r' +} + +// isEndOfLine reports whether r is an end-of-line character. +func isEndOfLine(r rune) bool { + return r == '\r' || r == '\n' +} + +// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. +func isAlphaNumeric(r rune) bool { + return isAlpha(r) || isDigit(r) +} + +// isDigit reports whether r is a digit. Note: we cannot use unicode.IsDigit() +// instead because that also classifies non-Latin digits as digits. See +// https://github.com/prometheus/prometheus/issues/939. +func isDigit(r rune) bool { + return '0' <= r && r <= '9' +} + +// isAlpha reports whether r is an alphabetic or underscore. +func isAlpha(r rune) bool { + return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') +} + +// isLabel reports whether the string can be used as label. +func isLabel(s string) bool { + if len(s) == 0 || !isAlpha(rune(s[0])) { + return false + } + for _, c := range s[1:] { + if !isAlphaNumeric(c) { + return false + } + } + return true +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/parse.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/parse.go new file mode 100644 index 000000000000..416af3c23ba7 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/parse.go @@ -0,0 +1,1139 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//nolint //Since this was copied from Prometheus leave it as is +package promql + +import ( + "fmt" + "math" + "os" + "runtime" + "sort" + "strconv" + "strings" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/value" + + "github.com/prometheus/prometheus/util/strutil" +) + +type parser struct { + lex *lexer + token [3]item + peekCount int +} + +// ParseErr wraps a parsing error with line and position context. +// If the parsing input was a single line, line will be 0 and omitted +// from the error string. +type ParseErr struct { + Line, Pos int + Err error +} + +func (e *ParseErr) Error() string { + if e.Line == 0 { + return fmt.Sprintf("parse error at char %d: %s", e.Pos, e.Err) + } + return fmt.Sprintf("parse error at line %d, char %d: %s", e.Line, e.Pos, e.Err) +} + +// ParseStmts parses the input and returns the resulting statements or any occurring error. +func ParseStmts(input string) (Statements, error) { + p := newParser(input) + + stmts, err := p.parseStmts() + if err != nil { + return nil, err + } + err = p.typecheck(stmts) + return stmts, err +} + +// ParseExpr returns the expression parsed from the input. +func ParseExpr(input string) (Expr, error) { + p := newParser(input) + + expr, err := p.parseExpr() + if err != nil { + return nil, err + } + err = p.typecheck(expr) + return expr, err +} + +// ParseMetric parses the input into a metric +func ParseMetric(input string) (m labels.Labels, err error) { + p := newParser(input) + defer p.recover(&err) + + m = p.metric() + if p.peek().typ != itemEOF { + p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:]) + } + return m, nil +} + +// ParseMetricSelector parses the provided textual metric selector into a list of +// label matchers. +func ParseMetricSelector(input string) (m []*labels.Matcher, err error) { + p := newParser(input) + defer p.recover(&err) + + name := "" + if t := p.peek().typ; t == itemMetricIdentifier || t == itemIdentifier { + name = p.next().val + } + vs := p.VectorSelector(name) + if p.peek().typ != itemEOF { + p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:]) + } + return vs.LabelMatchers, nil +} + +// newParser returns a new parser. +func newParser(input string) *parser { + p := &parser{ + lex: lex(input), + } + return p +} + +// parseStmts parses a sequence of statements from the input. +func (p *parser) parseStmts() (stmts Statements, err error) { + defer p.recover(&err) + stmts = Statements{} + + for p.peek().typ != itemEOF { + if p.peek().typ == itemComment { + continue + } + stmts = append(stmts, p.stmt()) + } + return +} + +// parseExpr parses a single expression from the input. +func (p *parser) parseExpr() (expr Expr, err error) { + defer p.recover(&err) + + for p.peek().typ != itemEOF { + if p.peek().typ == itemComment { + continue + } + if expr != nil { + p.errorf("could not parse remaining input %.15q...", p.lex.input[p.lex.lastPos:]) + } + expr = p.expr() + } + + if expr == nil { + p.errorf("no expression found in input") + } + return +} + +// sequenceValue is an omittable value in a sequence of time series values. +type sequenceValue struct { + value float64 + omitted bool +} + +func (v sequenceValue) String() string { + if v.omitted { + return "_" + } + return fmt.Sprintf("%f", v.value) +} + +// parseSeriesDesc parses the description of a time series. +func parseSeriesDesc(input string) (labels.Labels, []sequenceValue, error) { + p := newParser(input) + p.lex.seriesDesc = true + + return p.parseSeriesDesc() +} + +// parseSeriesDesc parses a description of a time series into its metric and value sequence. +func (p *parser) parseSeriesDesc() (m labels.Labels, vals []sequenceValue, err error) { + defer p.recover(&err) + + m = p.metric() + + const ctx = "series values" + for { + if p.peek().typ == itemEOF { + break + } + + // Extract blanks. + if p.peek().typ == itemBlank { + p.next() + times := uint64(1) + if p.peek().typ == itemTimes { + p.next() + times, err = strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64) + if err != nil { + p.errorf("invalid repetition in %s: %s", ctx, err) + } + } + for i := uint64(0); i < times; i++ { + vals = append(vals, sequenceValue{omitted: true}) + } + continue + } + + // Extract values. + sign := 1.0 + if t := p.peek().typ; t == itemSUB || t == itemADD { + if p.next().typ == itemSUB { + sign = -1 + } + } + var k float64 + if t := p.peek().typ; t == itemNumber { + k = sign * p.number(p.expect(itemNumber, ctx).val) + } else if t == itemIdentifier && p.peek().val == "stale" { + p.next() + k = math.Float64frombits(value.StaleNaN) + } else { + p.errorf("expected number or 'stale' in %s but got %s (value: %s)", ctx, t.desc(), p.peek()) + } + vals = append(vals, sequenceValue{ + value: k, + }) + + // If there are no offset repetitions specified, proceed with the next value. + if t := p.peek(); t.typ == itemNumber || t.typ == itemBlank || t.typ == itemIdentifier && t.val == "stale" { + continue + } else if t.typ == itemEOF { + break + } else if t.typ != itemADD && t.typ != itemSUB { + p.errorf("expected next value or relative expansion in %s but got %s (value: %s)", ctx, t.desc(), p.peek()) + } + + // Expand the repeated offsets into values. + sign = 1.0 + if p.next().typ == itemSUB { + sign = -1.0 + } + offset := sign * p.number(p.expect(itemNumber, ctx).val) + p.expect(itemTimes, ctx) + + times, err := strconv.ParseUint(p.expect(itemNumber, ctx).val, 10, 64) + if err != nil { + p.errorf("invalid repetition in %s: %s", ctx, err) + } + + for i := uint64(0); i < times; i++ { + k += offset + vals = append(vals, sequenceValue{ + value: k, + }) + } + } + return m, vals, nil +} + +// typecheck checks correct typing of the parsed statements or expression. +func (p *parser) typecheck(node Node) (err error) { + defer p.recover(&err) + + p.checkType(node) + return nil +} + +// next returns the next token. +func (p *parser) next() item { + if p.peekCount > 0 { + p.peekCount-- + } else { + t := p.lex.nextItem() + // Skip comments. + for t.typ == itemComment { + t = p.lex.nextItem() + } + p.token[0] = t + } + if p.token[p.peekCount].typ == itemError { + p.errorf("%s", p.token[p.peekCount].val) + } + return p.token[p.peekCount] +} + +// peek returns but does not consume the next token. +func (p *parser) peek() item { + if p.peekCount > 0 { + return p.token[p.peekCount-1] + } + p.peekCount = 1 + + t := p.lex.nextItem() + // Skip comments. + for t.typ == itemComment { + t = p.lex.nextItem() + } + p.token[0] = t + return p.token[0] +} + +// backup backs the input stream up one token. +func (p *parser) backup() { + p.peekCount++ +} + +// errorf formats the error and terminates processing. +func (p *parser) errorf(format string, args ...interface{}) { + p.error(fmt.Errorf(format, args...)) +} + +// error terminates processing. +func (p *parser) error(err error) { + perr := &ParseErr{ + Line: p.lex.lineNumber(), + Pos: p.lex.linePosition(), + Err: err, + } + if strings.Count(strings.TrimSpace(p.lex.input), "\n") == 0 { + perr.Line = 0 + } + panic(perr) +} + +// expect consumes the next token and guarantees it has the required type. +func (p *parser) expect(exp ItemType, context string) item { + token := p.next() + if token.typ != exp { + p.errorf("unexpected %s in %s, expected %s", token.desc(), context, exp.desc()) + } + return token +} + +// expectOneOf consumes the next token and guarantees it has one of the required types. +func (p *parser) expectOneOf(exp1, exp2 ItemType, context string) item { + token := p.next() + if token.typ != exp1 && token.typ != exp2 { + p.errorf("unexpected %s in %s, expected %s or %s", token.desc(), context, exp1.desc(), exp2.desc()) + } + return token +} + +var errUnexpected = fmt.Errorf("unexpected error") + +// recover is the handler that turns panics into returns from the top level of Parse. +func (p *parser) recover(errp *error) { + e := recover() + if e != nil { + if _, ok := e.(runtime.Error); ok { + // Print the stack trace but do not inhibit the running application. + buf := make([]byte, 64<<10) + buf = buf[:runtime.Stack(buf, false)] + + fmt.Fprintf(os.Stderr, "parser panic: %v\n%s", e, buf) + *errp = errUnexpected + } else { + *errp = e.(error) + } + } +} + +// stmt parses any statement. +// +// alertStatement | recordStatement +// +func (p *parser) stmt() Statement { + switch tok := p.peek(); tok.typ { + case itemAlert: + return p.alertStmt() + case itemIdentifier, itemMetricIdentifier: + return p.recordStmt() + } + p.errorf("no valid statement detected") + return nil +} + +// alertStmt parses an alert rule. +// +// ALERT name IF expr [FOR duration] +// [LABELS label_set] +// [ANNOTATIONS label_set] +// +func (p *parser) alertStmt() *AlertStmt { + const ctx = "alert statement" + + p.expect(itemAlert, ctx) + name := p.expect(itemIdentifier, ctx) + // Alerts require a Vector typed expression. + p.expect(itemIf, ctx) + expr := p.expr() + + // Optional for clause. + var ( + duration time.Duration + err error + ) + if p.peek().typ == itemFor { + p.next() + dur := p.expect(itemDuration, ctx) + duration, err = parseDuration(dur.val) + if err != nil { + p.error(err) + } + } + + var ( + lset labels.Labels + annotations labels.Labels + ) + if p.peek().typ == itemLabels { + p.expect(itemLabels, ctx) + lset = p.labelSet() + } + if p.peek().typ == itemAnnotations { + p.expect(itemAnnotations, ctx) + annotations = p.labelSet() + } + + return &AlertStmt{ + Name: name.val, + Expr: expr, + Duration: duration, + Labels: lset, + Annotations: annotations, + } +} + +// recordStmt parses a recording rule. +func (p *parser) recordStmt() *RecordStmt { + const ctx = "record statement" + + name := p.expectOneOf(itemIdentifier, itemMetricIdentifier, ctx).val + + var lset labels.Labels + if p.peek().typ == itemLeftBrace { + lset = p.labelSet() + } + + p.expect(itemAssign, ctx) + expr := p.expr() + + return &RecordStmt{ + Name: name, + Labels: lset, + Expr: expr, + } +} + +// expr parses any expression. +func (p *parser) expr() Expr { + // Parse the starting expression. + expr := p.unaryExpr() + + // Loop through the operations and construct a binary operation tree based + // on the operators' precedence. + for { + // If the next token is not an operator the expression is done. + op := p.peek().typ + if !op.isOperator() { + return expr + } + p.next() // Consume operator. + + // Parse optional operator matching options. Its validity + // is checked in the type-checking stage. + vecMatching := &VectorMatching{ + Card: CardOneToOne, + } + if op.isSetOperator() { + vecMatching.Card = CardManyToMany + } + + returnBool := false + // Parse bool modifier. + if p.peek().typ == itemBool { + if !op.isComparisonOperator() { + p.errorf("bool modifier can only be used on comparison operators") + } + p.next() + returnBool = true + } + + // Parse ON/IGNORING clause. + if p.peek().typ == itemOn || p.peek().typ == itemIgnoring { + if p.peek().typ == itemOn { + vecMatching.On = true + } + p.next() + vecMatching.MatchingLabels = p.labels() + + // Parse grouping. + if t := p.peek().typ; t == itemGroupLeft || t == itemGroupRight { + p.next() + if t == itemGroupLeft { + vecMatching.Card = CardManyToOne + } else { + vecMatching.Card = CardOneToMany + } + if p.peek().typ == itemLeftParen { + vecMatching.Include = p.labels() + } + } + } + + for _, ln := range vecMatching.MatchingLabels { + for _, ln2 := range vecMatching.Include { + if ln == ln2 && vecMatching.On { + p.errorf("label %q must not occur in ON and GROUP clause at once", ln) + } + } + } + + // Parse the next operand. + rhs := p.unaryExpr() + + // Assign the new root based on the precedence of the LHS and RHS operators. + expr = p.balance(expr, op, rhs, vecMatching, returnBool) + } +} + +func (p *parser) balance(lhs Expr, op ItemType, rhs Expr, vecMatching *VectorMatching, returnBool bool) *BinaryExpr { + if lhsBE, ok := lhs.(*BinaryExpr); ok { + precd := lhsBE.Op.precedence() - op.precedence() + if (precd < 0) || (precd == 0 && op.isRightAssociative()) { + balanced := p.balance(lhsBE.RHS, op, rhs, vecMatching, returnBool) + if lhsBE.Op.isComparisonOperator() && !lhsBE.ReturnBool && balanced.Type() == ValueTypeScalar && lhsBE.LHS.Type() == ValueTypeScalar { + p.errorf("comparisons between scalars must use BOOL modifier") + } + return &BinaryExpr{ + Op: lhsBE.Op, + LHS: lhsBE.LHS, + RHS: balanced, + VectorMatching: lhsBE.VectorMatching, + ReturnBool: lhsBE.ReturnBool, + } + } + } + if op.isComparisonOperator() && !returnBool && rhs.Type() == ValueTypeScalar && lhs.Type() == ValueTypeScalar { + p.errorf("comparisons between scalars must use BOOL modifier") + } + return &BinaryExpr{ + Op: op, + LHS: lhs, + RHS: rhs, + VectorMatching: vecMatching, + ReturnBool: returnBool, + } +} + +// unaryExpr parses a unary expression. +// +// | | (+|-) | '(' ')' +// +func (p *parser) unaryExpr() Expr { + switch t := p.peek(); t.typ { + case itemADD, itemSUB: + p.next() + e := p.unaryExpr() + + // Simplify unary expressions for number literals. + if nl, ok := e.(*NumberLiteral); ok { + if t.typ == itemSUB { + nl.Val *= -1 + } + return nl + } + return &UnaryExpr{Op: t.typ, Expr: e} + + case itemLeftParen: + p.next() + e := p.expr() + p.expect(itemRightParen, "paren expression") + + return &ParenExpr{Expr: e} + } + e := p.primaryExpr() + + // Expression might be followed by a range selector. + if p.peek().typ == itemLeftBracket { + vs, ok := e.(*VectorSelector) + if !ok { + p.errorf("range specification must be preceded by a metric selector, but follows a %T instead", e) + } + e = p.rangeSelector(vs) + } + + // Parse optional offset. + if p.peek().typ == itemOffset { + offset := p.offset() + + switch s := e.(type) { + case *VectorSelector: + s.Offset = offset + case *MatrixSelector: + s.Offset = offset + default: + p.errorf("offset modifier must be preceded by an instant or range selector, but follows a %T instead", e) + } + } + + return e +} + +// rangeSelector parses a Matrix (a.k.a. range) selector based on a given +// Vector selector. +// +// '[' ']' +// +func (p *parser) rangeSelector(vs *VectorSelector) *MatrixSelector { + const ctx = "range selector" + p.next() + + var erange time.Duration + var err error + + erangeStr := p.expect(itemDuration, ctx).val + erange, err = parseDuration(erangeStr) + if err != nil { + p.error(err) + } + + p.expect(itemRightBracket, ctx) + + e := &MatrixSelector{ + Name: vs.Name, + LabelMatchers: vs.LabelMatchers, + Range: erange, + } + return e +} + +// number parses a number. +func (p *parser) number(val string) float64 { + n, err := strconv.ParseInt(val, 0, 64) + f := float64(n) + if err != nil { + f, err = strconv.ParseFloat(val, 64) + } + if err != nil { + p.errorf("error parsing number: %s", err) + } + return f +} + +// primaryExpr parses a primary expression. +// +// | | | +// +func (p *parser) primaryExpr() Expr { + switch t := p.next(); { + case t.typ == itemNumber: + f := p.number(t.val) + return &NumberLiteral{f} + + case t.typ == itemString: + return &StringLiteral{p.unquoteString(t.val)} + + case t.typ == itemLeftBrace: + // Metric selector without metric name. + p.backup() + return p.VectorSelector("") + + case t.typ == itemIdentifier: + // Check for function call. + if p.peek().typ == itemLeftParen { + return p.call(t.val) + } + fallthrough // Else metric selector. + + case t.typ == itemMetricIdentifier: + return p.VectorSelector(t.val) + + case t.typ.isAggregator(): + p.backup() + return p.aggrExpr() + + default: + p.errorf("no valid expression found") + } + return nil +} + +// labels parses a list of labelnames. +// +// '(' , ... ')' +// +func (p *parser) labels() []string { + const ctx = "grouping opts" + + p.expect(itemLeftParen, ctx) + + labels := []string{} + if p.peek().typ != itemRightParen { + for { + id := p.next() + if !isLabel(id.val) { + p.errorf("unexpected %s in %s, expected label", id.desc(), ctx) + } + labels = append(labels, id.val) + + if p.peek().typ != itemComma { + break + } + p.next() + } + } + p.expect(itemRightParen, ctx) + + return labels +} + +// aggrExpr parses an aggregation expression. +// +// () [by|without ] +// [by|without ] () +// +func (p *parser) aggrExpr() *AggregateExpr { + const ctx = "aggregation" + + agop := p.next() + if !agop.typ.isAggregator() { + p.errorf("expected aggregation operator but got %s", agop) + } + var grouping []string + var without bool + + modifiersFirst := false + + if t := p.peek().typ; t == itemBy || t == itemWithout { + if t == itemWithout { + without = true + } + p.next() + grouping = p.labels() + modifiersFirst = true + } + + p.expect(itemLeftParen, ctx) + var param Expr + if agop.typ.isAggregatorWithParam() { + param = p.expr() + p.expect(itemComma, ctx) + } + e := p.expr() + p.expect(itemRightParen, ctx) + + if !modifiersFirst { + if t := p.peek().typ; t == itemBy || t == itemWithout { + if len(grouping) > 0 { + p.errorf("aggregation must only contain one grouping clause") + } + if t == itemWithout { + without = true + } + p.next() + grouping = p.labels() + } + } + + return &AggregateExpr{ + Op: agop.typ, + Expr: e, + Param: param, + Grouping: grouping, + Without: without, + } +} + +// call parses a function call. +// +// '(' [ , ...] ')' +// +func (p *parser) call(name string) *Call { + const ctx = "function call" + + fn, exist := getFunction(name) + if !exist { + p.errorf("unknown function with name %q", name) + } + + p.expect(itemLeftParen, ctx) + // Might be call without args. + if p.peek().typ == itemRightParen { + p.next() // Consume. + return &Call{fn, nil} + } + + var args []Expr + for { + e := p.expr() + args = append(args, e) + + // Terminate if no more arguments. + if p.peek().typ != itemComma { + break + } + p.next() + } + + // Call must be closed. + p.expect(itemRightParen, ctx) + + return &Call{Func: fn, Args: args} +} + +// labelSet parses a set of label matchers +// +// '{' [ '=' , ... ] '}' +// +func (p *parser) labelSet() labels.Labels { + set := []labels.Label{} + for _, lm := range p.labelMatchers(itemEQL) { + set = append(set, labels.Label{Name: lm.Name, Value: lm.Value}) + } + return labels.New(set...) +} + +// labelMatchers parses a set of label matchers. +// +// '{' [ , ... ] '}' +// +func (p *parser) labelMatchers(operators ...ItemType) []*labels.Matcher { + const ctx = "label matching" + + matchers := []*labels.Matcher{} + + p.expect(itemLeftBrace, ctx) + + // Check if no matchers are provided. + if p.peek().typ == itemRightBrace { + p.next() + return matchers + } + + for { + label := p.expect(itemIdentifier, ctx) + + op := p.next().typ + if !op.isOperator() { + p.errorf("expected label matching operator but got %s", op) + } + var validOp = false + for _, allowedOp := range operators { + if op == allowedOp { + validOp = true + } + } + if !validOp { + p.errorf("operator must be one of %q, is %q", operators, op) + } + + val := p.unquoteString(p.expect(itemString, ctx).val) + + // Map the item to the respective match type. + var matchType labels.MatchType + switch op { + case itemEQL: + matchType = labels.MatchEqual + case itemNEQ: + matchType = labels.MatchNotEqual + case itemEQLRegex: + matchType = labels.MatchRegexp + case itemNEQRegex: + matchType = labels.MatchNotRegexp + default: + p.errorf("item %q is not a metric match type", op) + } + + m, err := labels.NewMatcher(matchType, label.val, val) + if err != nil { + p.error(err) + } + + matchers = append(matchers, m) + + if p.peek().typ == itemIdentifier { + p.errorf("missing comma before next identifier %q", p.peek().val) + } + + // Terminate list if last matcher. + if p.peek().typ != itemComma { + break + } + p.next() + + // Allow comma after each item in a multi-line listing. + if p.peek().typ == itemRightBrace { + break + } + } + + p.expect(itemRightBrace, ctx) + + return matchers +} + +// metric parses a metric. +// +// +// [] +// +func (p *parser) metric() labels.Labels { + name := "" + var m labels.Labels + + t := p.peek().typ + if t == itemIdentifier || t == itemMetricIdentifier { + name = p.next().val + t = p.peek().typ + } + if t != itemLeftBrace && name == "" { + p.errorf("missing metric name or metric selector") + } + if t == itemLeftBrace { + m = p.labelSet() + } + if name != "" { + m = append(m, labels.Label{Name: labels.MetricName, Value: name}) + sort.Sort(m) + } + return m +} + +// offset parses an offset modifier. +// +// offset +// +func (p *parser) offset() time.Duration { + const ctx = "offset" + + p.next() + offi := p.expect(itemDuration, ctx) + + offset, err := parseDuration(offi.val) + if err != nil { + p.error(err) + } + + return offset +} + +// VectorSelector parses a new (instant) vector selector. +// +// [] +// [] +// +func (p *parser) VectorSelector(name string) *VectorSelector { + var matchers []*labels.Matcher + // Parse label matching if any. + if t := p.peek(); t.typ == itemLeftBrace { + matchers = p.labelMatchers(itemEQL, itemNEQ, itemEQLRegex, itemNEQRegex) + } + // Metric name must not be set in the label matchers and before at the same time. + if name != "" { + for _, m := range matchers { + if m.Name == labels.MetricName { + p.errorf("metric name must not be set twice: %q or %q", name, m.Value) + } + } + // Set name label matching. + m, err := labels.NewMatcher(labels.MatchEqual, labels.MetricName, name) + if err != nil { + panic(err) // Must not happen with metric.Equal. + } + matchers = append(matchers, m) + } + + if len(matchers) == 0 { + p.errorf("vector selector must contain label matchers or metric name") + } + // A Vector selector must contain at least one non-empty matcher to prevent + // implicit selection of all metrics (e.g. by a typo). + notEmpty := false + for _, lm := range matchers { + if !lm.Matches("") { + notEmpty = true + break + } + } + if !notEmpty { + p.errorf("vector selector must contain at least one non-empty matcher") + } + + return &VectorSelector{ + Name: name, + LabelMatchers: matchers, + } +} + +// expectType checks the type of the node and raises an error if it +// is not of the expected type. +func (p *parser) expectType(node Node, want ValueType, context string) { + t := p.checkType(node) + if t != want { + p.errorf("expected type %s in %s, got %s", documentedType(want), context, documentedType(t)) + } +} + +// check the types of the children of each node and raise an error +// if they do not form a valid node. +// +// Some of these checks are redundant as the the parsing stage does not allow +// them, but the costs are small and might reveal errors when making changes. +func (p *parser) checkType(node Node) (typ ValueType) { + // For expressions the type is determined by their Type function. + // Statements and lists do not have a type but are not invalid either. + switch n := node.(type) { + case Statements, Expressions, Statement: + typ = ValueTypeNone + case Expr: + typ = n.Type() + default: + p.errorf("unknown node type: %T", node) + } + + // Recursively check correct typing for child nodes and raise + // errors in case of bad typing. + switch n := node.(type) { + case Statements: + for _, s := range n { + p.expectType(s, ValueTypeNone, "statement list") + } + case *AlertStmt: + p.expectType(n.Expr, ValueTypeVector, "alert statement") + + case *EvalStmt: + ty := p.checkType(n.Expr) + if ty == ValueTypeNone { + p.errorf("evaluation statement must have a valid expression type but got %s", documentedType(ty)) + } + + case *RecordStmt: + ty := p.checkType(n.Expr) + if ty != ValueTypeVector && ty != ValueTypeScalar { + p.errorf("record statement must have a valid expression of type instant vector or scalar but got %s", documentedType(ty)) + } + + case Expressions: + for _, e := range n { + ty := p.checkType(e) + if ty == ValueTypeNone { + p.errorf("expression must have a valid expression type but got %s", documentedType(ty)) + } + } + case *AggregateExpr: + if !n.Op.isAggregator() { + p.errorf("aggregation operator expected in aggregation expression but got %q", n.Op) + } + p.expectType(n.Expr, ValueTypeVector, "aggregation expression") + if n.Op == itemTopK || n.Op == itemBottomK || n.Op == itemQuantile { + p.expectType(n.Param, ValueTypeScalar, "aggregation parameter") + } + if n.Op == itemCountValues { + p.expectType(n.Param, ValueTypeString, "aggregation parameter") + } + + case *BinaryExpr: + lt := p.checkType(n.LHS) + rt := p.checkType(n.RHS) + + if !n.Op.isOperator() { + p.errorf("binary expression does not support operator %q", n.Op) + } + if (lt != ValueTypeScalar && lt != ValueTypeVector) || (rt != ValueTypeScalar && rt != ValueTypeVector) { + p.errorf("binary expression must contain only scalar and instant vector types") + } + + if (lt != ValueTypeVector || rt != ValueTypeVector) && n.VectorMatching != nil { + if len(n.VectorMatching.MatchingLabels) > 0 { + p.errorf("vector matching only allowed between instant vectors") + } + n.VectorMatching = nil + } else { + // Both operands are Vectors. + if n.Op.isSetOperator() { + if n.VectorMatching.Card == CardOneToMany || n.VectorMatching.Card == CardManyToOne { + p.errorf("no grouping allowed for %q operation", n.Op) + } + if n.VectorMatching.Card != CardManyToMany { + p.errorf("set operations must always be many-to-many") + } + } + } + + if (lt == ValueTypeScalar || rt == ValueTypeScalar) && n.Op.isSetOperator() { + p.errorf("set operator %q not allowed in binary scalar expression", n.Op) + } + + case *Call: + nargs := len(n.Func.ArgTypes) + if n.Func.Variadic == 0 { + if nargs != len(n.Args) { + p.errorf("expected %d argument(s) in call to %q, got %d", nargs, n.Func.Name, len(n.Args)) + } + } else { + na := nargs - 1 + if na > len(n.Args) { + p.errorf("expected at least %d argument(s) in call to %q, got %d", na, n.Func.Name, len(n.Args)) + } else if nargsmax := na + n.Func.Variadic; n.Func.Variadic > 0 && nargsmax < len(n.Args) { + p.errorf("expected at most %d argument(s) in call to %q, got %d", nargsmax, n.Func.Name, len(n.Args)) + } + } + + for i, arg := range n.Args { + if i >= len(n.Func.ArgTypes) { + i = len(n.Func.ArgTypes) - 1 + } + p.expectType(arg, n.Func.ArgTypes[i], fmt.Sprintf("call to function %q", n.Func.Name)) + } + + case *ParenExpr: + p.checkType(n.Expr) + + case *UnaryExpr: + if n.Op != itemADD && n.Op != itemSUB { + p.errorf("only + and - operators allowed for unary expressions") + } + if t := p.checkType(n.Expr); t != ValueTypeScalar && t != ValueTypeVector { + p.errorf("unary expression only allowed on expressions of type scalar or instant vector, got %q", documentedType(t)) + } + + case *NumberLiteral, *MatrixSelector, *StringLiteral, *VectorSelector: + // Nothing to do for terminals. + + default: + p.errorf("unknown node type: %T", node) + } + return +} + +func (p *parser) unquoteString(s string) string { + unquoted, err := strutil.Unquote(s) + if err != nil { + p.errorf("error unquoting string %q: %s", s, err) + } + return unquoted +} + +func parseDuration(ds string) (time.Duration, error) { + dur, err := model.ParseDuration(ds) + if err != nil { + return 0, err + } + if dur == 0 { + return 0, fmt.Errorf("duration must be greater than 0") + } + return time.Duration(dur), nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/printer.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/printer.go new file mode 100644 index 000000000000..563adbe75789 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/printer.go @@ -0,0 +1,234 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" +) + +// Tree returns a string of the tree structure of the given node. +func Tree(node Node) string { + return tree(node, "") +} + +func tree(node Node, level string) string { + if node == nil { + return fmt.Sprintf("%s |---- %T\n", level, node) + } + typs := strings.Split(fmt.Sprintf("%T", node), ".")[1] + + var t string + // Only print the number of statements for readability. + if stmts, ok := node.(Statements); ok { + t = fmt.Sprintf("%s |---- %s :: %d\n", level, typs, len(stmts)) + } else { + t = fmt.Sprintf("%s |---- %s :: %s\n", level, typs, node) + } + + level += " · · ·" + + switch n := node.(type) { + case Statements: + for _, s := range n { + t += tree(s, level) + } + case *AlertStmt: + t += tree(n.Expr, level) + + case *EvalStmt: + t += tree(n.Expr, level) + + case *RecordStmt: + t += tree(n.Expr, level) + + case Expressions: + for _, e := range n { + t += tree(e, level) + } + case *AggregateExpr: + t += tree(n.Expr, level) + + case *BinaryExpr: + t += tree(n.LHS, level) + t += tree(n.RHS, level) + + case *Call: + t += tree(n.Args, level) + + case *ParenExpr: + t += tree(n.Expr, level) + + case *UnaryExpr: + t += tree(n.Expr, level) + + case *MatrixSelector, *NumberLiteral, *StringLiteral, *VectorSelector: + // nothing to do + + default: + panic("promql.Tree: not all node types covered") + } + return t +} + +func (stmts Statements) String() (s string) { + if len(stmts) == 0 { + return "" + } + for _, stmt := range stmts { + s += stmt.String() + s += "\n\n" + } + return s[:len(s)-2] +} + +func (node *AlertStmt) String() string { + s := fmt.Sprintf("ALERT %s", node.Name) + s += fmt.Sprintf("\n\tIF %s", node.Expr) + if node.Duration > 0 { + s += fmt.Sprintf("\n\tFOR %s", model.Duration(node.Duration)) + } + if len(node.Labels) > 0 { + s += fmt.Sprintf("\n\tLABELS %s", node.Labels) + } + if len(node.Annotations) > 0 { + s += fmt.Sprintf("\n\tANNOTATIONS %s", node.Annotations) + } + return s +} + +func (node *EvalStmt) String() string { + return "EVAL " + node.Expr.String() +} + +func (node *RecordStmt) String() string { + s := fmt.Sprintf("%s%s = %s", node.Name, node.Labels, node.Expr) + return s +} + +func (es Expressions) String() (s string) { + if len(es) == 0 { + return "" + } + for _, e := range es { + s += e.String() + s += ", " + } + return s[:len(s)-2] +} + +func (node *AggregateExpr) String() string { + aggrString := node.Op.String() + + if node.Without { + aggrString += fmt.Sprintf(" without(%s) ", strings.Join(node.Grouping, ", ")) + } else { + if len(node.Grouping) > 0 { + aggrString += fmt.Sprintf(" by(%s) ", strings.Join(node.Grouping, ", ")) + } + } + + aggrString += "(" + if node.Op.isAggregatorWithParam() { + aggrString += fmt.Sprintf("%s, ", node.Param) + } + aggrString += fmt.Sprintf("%s)", node.Expr) + + return aggrString +} + +func (node *BinaryExpr) String() string { + returnBool := "" + if node.ReturnBool { + returnBool = " bool" + } + + matching := "" + vm := node.VectorMatching + if vm != nil && (len(vm.MatchingLabels) > 0 || vm.On) { + if vm.On { + matching = fmt.Sprintf(" on(%s)", strings.Join(vm.MatchingLabels, ", ")) + } else { + matching = fmt.Sprintf(" ignoring(%s)", strings.Join(vm.MatchingLabels, ", ")) + } + if vm.Card == CardManyToOne || vm.Card == CardOneToMany { + matching += " group_" + if vm.Card == CardManyToOne { + matching += "left" + } else { + matching += "right" + } + matching += fmt.Sprintf("(%s)", strings.Join(vm.Include, ", ")) + } + } + return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, returnBool, matching, node.RHS) +} + +func (node *Call) String() string { + return fmt.Sprintf("%s(%s)", node.Func.Name, node.Args) +} + +func (node *MatrixSelector) String() string { + vecSelector := &VectorSelector{ + Name: node.Name, + LabelMatchers: node.LabelMatchers, + } + offset := "" + if node.Offset != time.Duration(0) { + offset = fmt.Sprintf(" offset %s", model.Duration(node.Offset)) + } + return fmt.Sprintf("%s[%s]%s", vecSelector.String(), model.Duration(node.Range), offset) +} + +func (node *NumberLiteral) String() string { + return fmt.Sprint(node.Val) +} + +func (node *ParenExpr) String() string { + return fmt.Sprintf("(%s)", node.Expr) +} + +func (node *StringLiteral) String() string { + return fmt.Sprintf("%q", node.Val) +} + +func (node *UnaryExpr) String() string { + return fmt.Sprintf("%s%s", node.Op, node.Expr) +} + +func (node *VectorSelector) String() string { + labelStrings := make([]string, 0, len(node.LabelMatchers)-1) + for _, matcher := range node.LabelMatchers { + // Only include the __name__ label if its no equality matching. + if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual { + continue + } + labelStrings = append(labelStrings, matcher.String()) + } + offset := "" + if node.Offset != time.Duration(0) { + offset = fmt.Sprintf(" offset %s", model.Duration(node.Offset)) + } + + if len(labelStrings) == 0 { + return fmt.Sprintf("%s%s", node.Name, offset) + } + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}%s", node.Name, strings.Join(labelStrings, ","), offset) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/quantile.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/quantile.go new file mode 100644 index 000000000000..28d78f4c47eb --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/quantile.go @@ -0,0 +1,183 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "math" + "sort" + + "github.com/prometheus/prometheus/pkg/labels" +) + +// Helpers to calculate quantiles. + +// excludedLabels are the labels to exclude from signature calculation for +// quantiles. +var excludedLabels = []string{ + labels.MetricName, + labels.BucketLabel, +} + +type bucket struct { + upperBound float64 + count float64 +} + +// buckets implements sort.Interface. +type buckets []bucket + +func (b buckets) Len() int { return len(b) } +func (b buckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b buckets) Less(i, j int) bool { return b[i].upperBound < b[j].upperBound } + +type metricWithBuckets struct { + metric labels.Labels + buckets buckets +} + +// bucketQuantile calculates the quantile 'q' based on the given buckets. The +// buckets will be sorted by upperBound by this function (i.e. no sorting +// needed before calling this function). The quantile value is interpolated +// assuming a linear distribution within a bucket. However, if the quantile +// falls into the highest bucket, the upper bound of the 2nd highest bucket is +// returned. A natural lower bound of 0 is assumed if the upper bound of the +// lowest bucket is greater 0. In that case, interpolation in the lowest bucket +// happens linearly between 0 and the upper bound of the lowest bucket. +// However, if the lowest bucket has an upper bound less or equal 0, this upper +// bound is returned if the quantile falls into the lowest bucket. +// +// There are a number of special cases (once we have a way to report errors +// happening during evaluations of AST functions, we should report those +// explicitly): +// +// If 'buckets' has fewer than 2 elements, NaN is returned. +// +// If the highest bucket is not +Inf, NaN is returned. +// +// If q<0, -Inf is returned. +// +// If q>1, +Inf is returned. +func bucketQuantile(q float64, buckets buckets) float64 { + if q < 0 { + return math.Inf(-1) + } + if q > 1 { + return math.Inf(+1) + } + if len(buckets) < 2 { + return math.NaN() + } + sort.Sort(buckets) + if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) { + return math.NaN() + } + + ensureMonotonic(buckets) + + rank := q * buckets[len(buckets)-1].count + b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank }) + + if b == len(buckets)-1 { + return buckets[len(buckets)-2].upperBound + } + if b == 0 && buckets[0].upperBound <= 0 { + return buckets[0].upperBound + } + var ( + bucketStart float64 + bucketEnd = buckets[b].upperBound + count = buckets[b].count + ) + if b > 0 { + bucketStart = buckets[b-1].upperBound + count -= buckets[b-1].count + rank -= buckets[b-1].count + } + return bucketStart + (bucketEnd-bucketStart)*(rank/count) +} + +// The assumption that bucket counts increase monotonically with increasing +// upperBound may be violated during: +// +// * Recording rule evaluation of histogram_quantile, especially when rate() +// has been applied to the underlying bucket timeseries. +// * Evaluation of histogram_quantile computed over federated bucket +// timeseries, especially when rate() has been applied. +// +// This is because scraped data is not made available to rule evaluation or +// federation atomically, so some buckets are computed with data from the +// most recent scrapes, but the other buckets are missing data from the most +// recent scrape. +// +// Monotonicity is usually guaranteed because if a bucket with upper bound +// u1 has count c1, then any bucket with a higher upper bound u > u1 must +// have counted all c1 observations and perhaps more, so that c >= c1. +// +// Randomly interspersed partial sampling breaks that guarantee, and rate() +// exacerbates it. Specifically, suppose bucket le=1000 has a count of 10 from +// 4 samples but the bucket with le=2000 has a count of 7 from 3 samples. The +// monotonicity is broken. It is exacerbated by rate() because under normal +// operation, cumulative counting of buckets will cause the bucket counts to +// diverge such that small differences from missing samples are not a problem. +// rate() removes this divergence.) +// +// bucketQuantile depends on that monotonicity to do a binary search for the +// bucket with the φ-quantile count, so breaking the monotonicity +// guarantee causes bucketQuantile() to return undefined (nonsense) results. +// +// As a somewhat hacky solution until ingestion is atomic per scrape, we +// calculate the "envelope" of the histogram buckets, essentially removing +// any decreases in the count between successive buckets. + +func ensureMonotonic(buckets buckets) { + max := buckets[0].count + for i := range buckets[1:] { + switch { + case buckets[i].count > max: + max = buckets[i].count + case buckets[i].count < max: + buckets[i].count = max + } + } +} + +// qauntile calculates the given quantile of a vector of samples. +// +// The Vector will be sorted. +// If 'values' has zero elements, NaN is returned. +// If q<0, -Inf is returned. +// If q>1, +Inf is returned. +func quantile(q float64, values vectorByValueHeap) float64 { + if len(values) == 0 { + return math.NaN() + } + if q < 0 { + return math.Inf(-1) + } + if q > 1 { + return math.Inf(+1) + } + sort.Sort(values) + + n := float64(len(values)) + // When the quantile lies between two samples, + // we use a weighted average of the two samples. + rank := q * (n - 1) + + lowerIndex := math.Max(0, math.Floor(rank)) + upperIndex := math.Min(n-1, lowerIndex+1) + + weight := rank - math.Floor(rank) + return values[int(lowerIndex)].V*(1-weight) + values[int(upperIndex)].V*weight +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/test.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/test.go new file mode 100644 index 000000000000..cd74b7c9b0fa --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/test.go @@ -0,0 +1,587 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//nolint //Since this was copied from Prometheus leave it as is +package promql + +import ( + "context" + "fmt" + "io/ioutil" + "math" + "os" + "regexp" + "strconv" + "strings" + "time" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/storage/tsdb" + "github.com/prometheus/prometheus/util/testutil" +) + +var ( + minNormal = math.Float64frombits(0x0010000000000000) // The smallest positive normal value of type float64. + + patSpace = regexp.MustCompile("[\t ]+") + patLoad = regexp.MustCompile(`^load\s+(.+?)$`) + patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) +) + +const ( + epsilon = 0.000001 // Relative error allowed for sample values. +) + +var testStartTime = time.Unix(0, 0) + +// Test is a sequence of read and write commands that are run +// against a test storage. +type Test struct { + testutil.T + + cmds []testCommand + + storage storage.Storage + + queryEngine *Engine + context context.Context + cancelCtx context.CancelFunc +} + +// NewTest returns an initialized empty Test. +func NewTest(t testutil.T, input string) (*Test, error) { + test := &Test{ + T: t, + cmds: []testCommand{}, + } + err := test.parse(input) + test.clear() + + return test, err +} + +func newTestFromFile(t testutil.T, filename string) (*Test, error) { + content, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return NewTest(t, string(content)) +} + +// QueryEngine returns the test's query engine. +func (t *Test) QueryEngine() *Engine { + return t.queryEngine +} + +// Queryable allows querying the test data. +func (t *Test) Queryable() storage.Queryable { + return t.storage +} + +// Context returns the test's context. +func (t *Test) Context() context.Context { + return t.context +} + +// Storage returns the test's storage. +func (t *Test) Storage() storage.Storage { + return t.storage +} + +func raise(line int, format string, v ...interface{}) error { + return &ParseErr{ + Line: line + 1, + Err: fmt.Errorf(format, v...), + } +} + +func (t *Test) parseLoad(lines []string, i int) (int, *loadCmd, error) { + if !patLoad.MatchString(lines[i]) { + return i, nil, raise(i, "invalid load command. (load )") + } + parts := patLoad.FindStringSubmatch(lines[i]) + + gap, err := model.ParseDuration(parts[1]) + if err != nil { + return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err) + } + cmd := newLoadCmd(time.Duration(gap)) + for i+1 < len(lines) { + i++ + defLine := lines[i] + if len(defLine) == 0 { + i-- + break + } + metric, vals, err := parseSeriesDesc(defLine) + if err != nil { + if perr, ok := err.(*ParseErr); ok { + perr.Line = i + 1 + } + return i, nil, err + } + cmd.set(metric, vals...) + } + return i, cmd, nil +} + +func (t *Test) parseEval(lines []string, i int) (int, *evalCmd, error) { + if !patEvalInstant.MatchString(lines[i]) { + return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at ] ") + } + parts := patEvalInstant.FindStringSubmatch(lines[i]) + var ( + mod = parts[1] + at = parts[2] + expr = parts[3] + ) + _, err := ParseExpr(expr) + if err != nil { + if perr, ok := err.(*ParseErr); ok { + perr.Line = i + 1 + perr.Pos += strings.Index(lines[i], expr) + } + return i, nil, err + } + + offset, err := model.ParseDuration(at) + if err != nil { + return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err) + } + ts := testStartTime.Add(time.Duration(offset)) + + cmd := newEvalCmd(expr, ts, i+1) + switch mod { + case "ordered": + cmd.ordered = true + case "fail": + cmd.fail = true + } + + for j := 1; i+1 < len(lines); j++ { + i++ + defLine := lines[i] + if len(defLine) == 0 { + i-- + break + } + if f, err := parseNumber(defLine); err == nil { + cmd.expect(0, nil, sequenceValue{value: f}) + break + } + metric, vals, err := parseSeriesDesc(defLine) + if err != nil { + if perr, ok := err.(*ParseErr); ok { + perr.Line = i + 1 + } + return i, nil, err + } + + // Currently, we are not expecting any matrices. + if len(vals) > 1 { + return i, nil, raise(i, "expecting multiple values in instant evaluation not allowed") + } + cmd.expect(j, metric, vals...) + } + return i, cmd, nil +} + +// parse the given command sequence and appends it to the test. +func (t *Test) parse(input string) error { + // Trim lines and remove comments. + lines := strings.Split(input, "\n") + for i, l := range lines { + l = strings.TrimSpace(l) + if strings.HasPrefix(l, "#") { + l = "" + } + lines[i] = l + } + var err error + + // Scan for steps line by line. + for i := 0; i < len(lines); i++ { + l := lines[i] + if len(l) == 0 { + continue + } + var cmd testCommand + + switch c := strings.ToLower(patSpace.Split(l, 2)[0]); { + case c == "clear": + cmd = &clearCmd{} + case c == "load": + i, cmd, err = t.parseLoad(lines, i) + case strings.HasPrefix(c, "eval"): + i, cmd, err = t.parseEval(lines, i) + default: + return raise(i, "invalid command %q", l) + } + if err != nil { + return err + } + t.cmds = append(t.cmds, cmd) + } + return nil +} + +// testCommand is an interface that ensures that only the package internal +// types can be a valid command for a test. +type testCommand interface { + testCmd() +} + +func (*clearCmd) testCmd() {} +func (*loadCmd) testCmd() {} +func (*evalCmd) testCmd() {} + +// loadCmd is a command that loads sequences of sample values for specific +// metrics into the storage. +type loadCmd struct { + gap time.Duration + metrics map[uint64]labels.Labels + defs map[uint64][]Point +} + +func newLoadCmd(gap time.Duration) *loadCmd { + return &loadCmd{ + gap: gap, + metrics: map[uint64]labels.Labels{}, + defs: map[uint64][]Point{}, + } +} + +func (cmd loadCmd) String() string { + return "load" +} + +// set a sequence of sample values for the given metric. +func (cmd *loadCmd) set(m labels.Labels, vals ...sequenceValue) { + h := m.Hash() + + samples := make([]Point, 0, len(vals)) + ts := testStartTime + for _, v := range vals { + if !v.omitted { + samples = append(samples, Point{ + T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond), + V: v.value, + }) + } + ts = ts.Add(cmd.gap) + } + cmd.defs[h] = samples + cmd.metrics[h] = m +} + +// append the defined time series to the storage. +func (cmd *loadCmd) append(a storage.Appender) error { + for h, smpls := range cmd.defs { + m := cmd.metrics[h] + + for _, s := range smpls { + if _, err := a.Add(m, s.T, s.V); err != nil { + return err + } + } + } + return nil +} + +// evalCmd is a command that evaluates an expression for the given time (range) +// and expects a specific result. +type evalCmd struct { + expr string + start time.Time + line int + + fail, ordered bool + + metrics map[uint64]labels.Labels + expected map[uint64]entry +} + +type entry struct { + pos int + vals []sequenceValue +} + +func (e entry) String() string { + return fmt.Sprintf("%d: %s", e.pos, e.vals) +} + +func newEvalCmd(expr string, start time.Time, line int) *evalCmd { + return &evalCmd{ + expr: expr, + start: start, + line: line, + + metrics: map[uint64]labels.Labels{}, + expected: map[uint64]entry{}, + } +} + +func (ev *evalCmd) String() string { + return "eval" +} + +// expect adds a new metric with a sequence of values to the set of expected +// results for the query. +func (ev *evalCmd) expect(pos int, m labels.Labels, vals ...sequenceValue) { + if m == nil { + ev.expected[0] = entry{pos: pos, vals: vals} + return + } + h := m.Hash() + ev.metrics[h] = m + ev.expected[h] = entry{pos: pos, vals: vals} +} + +// compareResult compares the result value with the defined expectation. +func (ev *evalCmd) compareResult(result Value) error { + switch val := result.(type) { + case Matrix: + return fmt.Errorf("received range result on instant evaluation") + + case Vector: + seen := map[uint64]bool{} + for pos, v := range val { + fp := v.Metric.Hash() + if _, ok := ev.metrics[fp]; !ok { + return fmt.Errorf("unexpected metric %s in result", v.Metric) + } + exp := ev.expected[fp] + if ev.ordered && exp.pos != pos+1 { + return fmt.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric, exp.vals, exp.pos, pos+1) + } + if !almostEqual(exp.vals[0].value, v.V) { + return fmt.Errorf("expected %v for %s but got %v", exp.vals[0].value, v.Metric, v.V) + } + + seen[fp] = true + } + for fp, expVals := range ev.expected { + if !seen[fp] { + fmt.Println("vector result", len(val), ev.expr) + for _, ss := range val { + fmt.Println(" ", ss.Metric, ss.Point) + } + return fmt.Errorf("expected metric %s with %v not found", ev.metrics[fp], expVals) + } + } + + case Scalar: + if !almostEqual(ev.expected[0].vals[0].value, val.V) { + return fmt.Errorf("expected Scalar %v but got %v", val.V, ev.expected[0].vals[0].value) + } + + default: + panic(fmt.Errorf("promql.Test.compareResult: unexpected result type %T", result)) + } + return nil +} + +// clearCmd is a command that wipes the test's storage state. +type clearCmd struct{} + +func (cmd clearCmd) String() string { + return "clear" +} + +// Run executes the command sequence of the test. Until the maximum error number +// is reached, evaluation errors do not terminate execution. +func (t *Test) Run() error { + for _, cmd := range t.cmds { + err := t.exec(cmd) + // TODO(fabxc): aggregate command errors, yield diffs for result + // comparison errors. + if err != nil { + return err + } + } + return nil +} + +// exec processes a single step of the test. +func (t *Test) exec(tc testCommand) error { + switch cmd := tc.(type) { + case *clearCmd: + t.clear() + + case *loadCmd: + app, err := t.storage.Appender() + if err != nil { + return err + } + if err := cmd.append(app); err != nil { + app.Rollback() + return err + } + + if err := app.Commit(); err != nil { + return err + } + + case *evalCmd: + q, _ := t.queryEngine.NewInstantQuery(t.storage, cmd.expr, cmd.start) + res := q.Exec(t.context) + if res.Err != nil { + if cmd.fail { + return nil + } + return fmt.Errorf("error evaluating query %q (line %d): %s", cmd.expr, cmd.line, res.Err) + } + defer q.Close() + if res.Err == nil && cmd.fail { + return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line) + } + + err := cmd.compareResult(res.Value) + if err != nil { + return fmt.Errorf("error in %s %s: %s", cmd, cmd.expr, err) + } + + // Check query returns same result in range mode, + /// by checking against the middle step. + q, _ = t.queryEngine.NewRangeQuery(t.storage, cmd.expr, cmd.start.Add(-time.Minute), cmd.start.Add(time.Minute), time.Minute) + rangeRes := q.Exec(t.context) + if rangeRes.Err != nil { + return fmt.Errorf("error evaluating query %q (line %d) in range mode: %s", cmd.expr, cmd.line, rangeRes.Err) + } + defer q.Close() + if cmd.ordered { + // Ordering isn't defined for range queries. + return nil + } + mat := rangeRes.Value.(Matrix) + vec := make(Vector, 0, len(mat)) + for _, series := range mat { + for _, point := range series.Points { + if point.T == timeMilliseconds(cmd.start) { + vec = append(vec, Sample{Metric: series.Metric, Point: point}) + break + } + } + } + if _, ok := res.Value.(Scalar); ok { + err = cmd.compareResult(Scalar{V: vec[0].Point.V}) + } else { + err = cmd.compareResult(vec) + } + if err != nil { + return fmt.Errorf("error in %s %s (line %d) rande mode: %s", cmd, cmd.expr, cmd.line, err) + } + + default: + panic("promql.Test.exec: unknown test command type") + } + return nil +} + +// clear the current test storage of all inserted samples. +func (t *Test) clear() { + if t.storage != nil { + if err := t.storage.Close(); err != nil { + t.T.Fatalf("closing test storage: %s", err) + } + } + if t.cancelCtx != nil { + t.cancelCtx() + } + t.storage = NewStorage(t) + + t.queryEngine = NewEngine(nil, nil, 20, 10*time.Second) + t.context, t.cancelCtx = context.WithCancel(context.Background()) +} + +// Close closes resources associated with the Test. +func (t *Test) Close() { + t.cancelCtx() + + if err := t.storage.Close(); err != nil { + t.T.Fatalf("closing test storage: %s", err) + } +} + +// samplesAlmostEqual returns true if the two sample lines only differ by a +// small relative error in their sample value. +func almostEqual(a, b float64) bool { + // NaN has no equality but for testing we still want to know whether both values + // are NaN. + if math.IsNaN(a) && math.IsNaN(b) { + return true + } + + // Cf. http://floating-point-gui.de/errors/comparison/ + if a == b { + return true + } + + diff := math.Abs(a - b) + + if a == 0 || b == 0 || diff < minNormal { + return diff < epsilon*minNormal + } + return diff/(math.Abs(a)+math.Abs(b)) < epsilon +} + +func parseNumber(s string) (float64, error) { + n, err := strconv.ParseInt(s, 0, 64) + f := float64(n) + if err != nil { + f, err = strconv.ParseFloat(s, 64) + } + if err != nil { + return 0, fmt.Errorf("error parsing number: %s", err) + } + return f, nil +} + +type T interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) +} + +// NewStorage returns a new storage for testing purposes +// that removes all associated files on closing. +func NewStorage(t T) storage.Storage { + dir, err := ioutil.TempDir("", "test_storage") + if err != nil { + t.Fatalf("Opening test dir failed: %s", err) + } + + // Tests just load data for a series sequentially. Thus we + // need a long appendable window. + db, err := tsdb.Open(dir, nil, nil, &tsdb.Options{ + MinBlockDuration: model.Duration(24 * time.Hour), + MaxBlockDuration: model.Duration(24 * time.Hour), + }) + if err != nil { + t.Fatalf("Opening test storage failed: %s", err) + } + return testStorage{Storage: tsdb.Adapter(db, int64(0)), dir: dir} +} + +type testStorage struct { + storage.Storage + dir string +} + +func (s testStorage) Close() error { + if err := s.Storage.Close(); err != nil { + return err + } + return os.RemoveAll(s.dir) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/value.go b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/value.go new file mode 100644 index 000000000000..fe902cd23a3c --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/legacy_promql/value.go @@ -0,0 +1,216 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/prometheus/pkg/labels" +) + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValueTypeMatrix } +func (Vector) Type() ValueType { return ValueTypeVector } +func (Scalar) Type() ValueType { return ValueTypeScalar } +func (String) Type() ValueType { return ValueTypeString } + +// ValueType describes a type of a value. +type ValueType string + +// The valid value types. +const ( + ValueTypeNone = "none" + ValueTypeVector = "vector" + ValueTypeScalar = "scalar" + ValueTypeMatrix = "matrix" + ValueTypeString = "string" +) + +// String represents a string value. +type String struct { + V string + T int64 +} + +func (s String) String() string { + return s.V +} + +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([...]interface{}{float64(s.T) / 1000, s.V}) +} + +// Scalar is a data point that's explicitly not associated with a metric. +type Scalar struct { + T int64 + V float64 +} + +func (s Scalar) String() string { + v := strconv.FormatFloat(s.V, 'f', -1, 64) + return fmt.Sprintf("scalar: %v @[%v]", v, s.T) +} + +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(s.V, 'f', -1, 64) + return json.Marshal([...]interface{}{float64(s.T) / 1000, v}) +} + +// Series is a stream of data points belonging to a metric. +type Series struct { + Metric labels.Labels `json:"metric"` + Points []Point `json:"values"` +} + +func (s Series) String() string { + vals := make([]string, len(s.Points)) + for i, v := range s.Points { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", s.Metric, strings.Join(vals, "\n")) +} + +// Point represents a single data point for a given timestamp. +type Point struct { + T int64 + V float64 +} + +func (p Point) String() string { + v := strconv.FormatFloat(p.V, 'f', -1, 64) + return fmt.Sprintf("%v @[%v]", v, p.T) +} + +// MarshalJSON implements json.Marshaler. +func (p Point) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(p.V, 'f', -1, 64) + return json.Marshal([...]interface{}{float64(p.T) / 1000, v}) +} + +// Sample is a single sample belonging to a metric. +type Sample struct { + Point + + Metric labels.Labels +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, s.Point) +} + +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + M labels.Labels `json:"metric"` + V Point `json:"value"` + }{ + M: s.Metric, + V: s.Point, + } + return json.Marshal(v) +} + +// Vector is basically only an alias for model.Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +// Matrix is a slice of Seriess that implements sort.Interface and +// has a String method. +type Matrix []Series + +func (m Matrix) String() string { + // TODO(fabxc): sort, or can we rely on order from the querier? + strs := make([]string, len(m)) + + for i, ss := range m { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return labels.Compare(m[i].Metric, m[j].Metric) < 0 } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +// Result holds the resulting value of an execution or an error +// if any occurred. +type Result struct { + Err error + Value Value +} + +// Vector returns a Vector if the result value is one. An error is returned if +// the result was an error or the result value is not a Vector. +func (r *Result) Vector() (Vector, error) { + if r.Err != nil { + return nil, r.Err + } + v, ok := r.Value.(Vector) + if !ok { + return nil, fmt.Errorf("query result is not a Vector") + } + return v, nil +} + +// Matrix returns a Matrix. An error is returned if +// the result was an error or the result value is not a Matrix. +func (r *Result) Matrix() (Matrix, error) { + if r.Err != nil { + return nil, r.Err + } + v, ok := r.Value.(Matrix) + if !ok { + return nil, fmt.Errorf("query result is not a range Vector") + } + return v, nil +} + +// Scalar returns a Scalar value. An error is returned if +// the result was an error or the result value is not a Scalar. +func (r *Result) Scalar() (Scalar, error) { + if r.Err != nil { + return Scalar{}, r.Err + } + v, ok := r.Value.(Scalar) + if !ok { + return Scalar{}, fmt.Errorf("query result is not a Scalar") + } + return v, nil +} + +func (r *Result) String() string { + if r.Err != nil { + return r.Err.Error() + } + if r.Value == nil { + return "" + } + return r.Value.String() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go b/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go new file mode 100644 index 000000000000..178ed5e47428 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go @@ -0,0 +1,453 @@ +package userconfig + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/pkg/errors" + + "gopkg.in/yaml.v2" + + "github.com/go-kit/kit/log" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/rules" + + legacy_promql "github.com/cortexproject/cortex/pkg/configs/legacy_promql" + legacy_rulefmt "github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt" + "github.com/cortexproject/cortex/pkg/util" +) + +// An ID is the ID of a single users's Cortex configuration. When a +// configuration changes, it gets a new ID. +type ID int + +// RuleFormatVersion indicates which Prometheus rule format (v1 vs. v2) to use in parsing. +type RuleFormatVersion int + +const ( + // RuleFormatV1 is the Prometheus 1.x rule format. + RuleFormatV1 RuleFormatVersion = iota + // RuleFormatV2 is the Prometheus 2.x rule format. + RuleFormatV2 RuleFormatVersion = iota +) + +// IsValid returns whether the rules format version is a valid (known) version. +func (v RuleFormatVersion) IsValid() bool { + switch v { + case RuleFormatV1, RuleFormatV2: + return true + default: + return false + } +} + +// MarshalJSON implements json.Marshaler. +func (v RuleFormatVersion) MarshalJSON() ([]byte, error) { + switch v { + case RuleFormatV1: + return json.Marshal("1") + case RuleFormatV2: + return json.Marshal("2") + default: + return nil, fmt.Errorf("unknown rule format version %d", v) + } +} + +// MarshalYAML implements yaml.Marshaler. +func (v RuleFormatVersion) MarshalYAML() (interface{}, error) { + switch v { + case RuleFormatV1: + return yaml.Marshal("1") + case RuleFormatV2: + return yaml.Marshal("2") + default: + return nil, fmt.Errorf("unknown rule format version %d", v) + } +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *RuleFormatVersion) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + switch s { + case "1": + *v = RuleFormatV1 + case "2": + *v = RuleFormatV2 + default: + return fmt.Errorf("unknown rule format version %q", string(data)) + } + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (v *RuleFormatVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + switch s { + case "1": + *v = RuleFormatV1 + case "2": + *v = RuleFormatV2 + default: + return fmt.Errorf("unknown rule format version %q", s) + } + return nil +} + +// A Config is a Cortex configuration for a single user. +type Config struct { + // RulesFiles maps from a rules filename to file contents. + RulesConfig RulesConfig + TemplateFiles map[string]string + AlertmanagerConfig string +} + +// configCompat is a compatibility struct to support old JSON config blobs +// saved in the config DB that didn't have a rule format version yet and +// just had a top-level field for the rule files. +type configCompat struct { + RulesFiles map[string]string `json:"rules_files" yaml:"rules_files"` + RuleFormatVersion RuleFormatVersion `json:"rule_format_version" yaml:"rule_format_version"` + TemplateFiles map[string]string `json:"template_files" yaml:"template_files"` + AlertmanagerConfig string `json:"alertmanager_config" yaml:"alertmanager_config"` +} + +// MarshalJSON implements json.Marshaler. +func (c Config) MarshalJSON() ([]byte, error) { + compat := &configCompat{ + RulesFiles: c.RulesConfig.Files, + RuleFormatVersion: c.RulesConfig.FormatVersion, + TemplateFiles: c.TemplateFiles, + AlertmanagerConfig: c.AlertmanagerConfig, + } + + return json.Marshal(compat) +} + +// MarshalYAML implements yaml.Marshaler. +func (c Config) MarshalYAML() (interface{}, error) { + compat := &configCompat{ + RulesFiles: c.RulesConfig.Files, + RuleFormatVersion: c.RulesConfig.FormatVersion, + TemplateFiles: c.TemplateFiles, + AlertmanagerConfig: c.AlertmanagerConfig, + } + + return yaml.Marshal(compat) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (c *Config) UnmarshalJSON(data []byte) error { + compat := configCompat{} + if err := json.Unmarshal(data, &compat); err != nil { + return err + } + *c = Config{ + RulesConfig: RulesConfig{ + Files: compat.RulesFiles, + FormatVersion: compat.RuleFormatVersion, + }, + TemplateFiles: compat.TemplateFiles, + AlertmanagerConfig: compat.AlertmanagerConfig, + } + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + compat := configCompat{} + if err := unmarshal(&compat); err != nil { + return errors.WithStack(err) + } + *c = Config{ + RulesConfig: RulesConfig{ + Files: compat.RulesFiles, + FormatVersion: compat.RuleFormatVersion, + }, + TemplateFiles: compat.TemplateFiles, + AlertmanagerConfig: compat.AlertmanagerConfig, + } + return nil +} + +// View is what's returned from the Weave Cloud configs service +// when we ask for all Cortex configurations. +// +// The configs service is essentially a JSON blob store that gives each +// _version_ of a configuration a unique ID and guarantees that later versions +// have greater IDs. +type View struct { + ID ID `json:"id"` + Config Config `json:"config"` + DeletedAt time.Time `json:"deleted_at"` +} + +// IsDeleted tells you if the config is deleted. +func (v View) IsDeleted() bool { + return !v.DeletedAt.IsZero() +} + +// GetVersionedRulesConfig specializes the view to just the rules config. +func (v View) GetVersionedRulesConfig() *VersionedRulesConfig { + if v.Config.RulesConfig.Files == nil { + return nil + } + return &VersionedRulesConfig{ + ID: v.ID, + Config: v.Config.RulesConfig, + DeletedAt: v.DeletedAt, + } +} + +// RulesConfig is the rules configuration for a particular organization. +type RulesConfig struct { + FormatVersion RuleFormatVersion `json:"format_version"` + Files map[string]string `json:"files"` +} + +// Equal compares two RulesConfigs for equality. +// +// instance Eq RulesConfig +func (c RulesConfig) Equal(o RulesConfig) bool { + if c.FormatVersion != o.FormatVersion { + return false + } + if len(o.Files) != len(c.Files) { + return false + } + for k, v1 := range c.Files { + v2, ok := o.Files[k] + if !ok || v1 != v2 { + return false + } + } + return true +} + +// Parse parses and validates the content of the rule files in a RulesConfig +// according to the passed rule format version. +func (c RulesConfig) Parse() (map[string][]rules.Rule, error) { + switch c.FormatVersion { + case RuleFormatV1: + return c.parseV1() + case RuleFormatV2: + return c.parseV2() + default: + return nil, fmt.Errorf("unknown rule format version %v", c.FormatVersion) + } +} + +// ParseFormatted returns the rulefmt map of a users rules configs. It allows +// for rules to be mapped to disk and read by the prometheus rules manager. +func (c RulesConfig) ParseFormatted() (map[string]legacy_rulefmt.RuleGroups, error) { + switch c.FormatVersion { + case RuleFormatV1: + return c.parseV1Formatted() + case RuleFormatV2: + return c.parseV2Formatted() + default: + return nil, fmt.Errorf("unknown rule format version %v", c.FormatVersion) + } +} + +// parseV2 parses and validates the content of the rule files in a RulesConfig +// according to the Prometheus 2.x rule format. +func (c RulesConfig) parseV2Formatted() (map[string]legacy_rulefmt.RuleGroups, error) { + ruleMap := map[string]legacy_rulefmt.RuleGroups{} + + for fn, content := range c.Files { + rgs, errs := legacy_rulefmt.Parse([]byte(content)) + for _, err := range errs { // return just the first error, if any + return nil, err + } + ruleMap[fn] = *rgs + + } + return ruleMap, nil +} + +// parseV1 parses and validates the content of the rule files in a RulesConfig +// according to the Prometheus 1.x rule format. +func (c RulesConfig) parseV1Formatted() (map[string]legacy_rulefmt.RuleGroups, error) { + result := map[string]legacy_rulefmt.RuleGroups{} + for fn, content := range c.Files { + stmts, err := legacy_promql.ParseStmts(content) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %s", fn, err) + } + + ra := []legacy_rulefmt.Rule{} + for _, stmt := range stmts { + var rule legacy_rulefmt.Rule + switch r := stmt.(type) { + case *legacy_promql.AlertStmt: + _, err := promql.ParseExpr(r.Expr.String()) + if err != nil { + return nil, err + } + + rule = legacy_rulefmt.Rule{ + Alert: r.Name, + Expr: r.Expr.String(), + For: model.Duration(r.Duration), + Labels: r.Labels.Map(), + Annotations: r.Annotations.Map(), + } + + case *legacy_promql.RecordStmt: + _, err := promql.ParseExpr(r.Expr.String()) + if err != nil { + return nil, err + } + + rule = legacy_rulefmt.Rule{ + Record: r.Name, + Expr: r.Expr.String(), + Labels: r.Labels.Map(), + } + + default: + return nil, fmt.Errorf("ruler.GetRules: unknown statement type") + } + ra = append(ra, rule) + } + result[fn] = legacy_rulefmt.RuleGroups{ + Groups: []legacy_rulefmt.RuleGroup{ + { + Name: "rg:" + fn, + Rules: ra, + }, + }, + } + } + return result, nil +} + +// parseV2 parses and validates the content of the rule files in a RulesConfig +// according to the Prometheus 2.x rule format. +// +// NOTE: On one hand, we cannot return fully-fledged lists of rules.Group +// here yet, as creating a rules.Group requires already +// passing in rules.ManagerOptions options (which in turn require a +// notifier, appender, etc.), which we do not want to create simply +// for parsing. On the other hand, we should not return barebones +// rulefmt.RuleGroup sets here either, as only a fully-converted rules.Rule +// is able to track alert states over multiple rule evaluations. The caller +// would otherwise have to ensure to convert the rulefmt.RuleGroup only exactly +// once, not for every evaluation (or risk losing alert pending states). So +// it's probably better to just return a set of rules.Rule here. +func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) { + groups := map[string][]rules.Rule{} + + for fn, content := range c.Files { + rgs, errs := legacy_rulefmt.Parse([]byte(content)) + if len(errs) > 0 { + return nil, fmt.Errorf("error parsing %s: %v", fn, errs[0]) + } + + for _, rg := range rgs.Groups { + rls := make([]rules.Rule, 0, len(rg.Rules)) + for _, rl := range rg.Rules { + expr, err := promql.ParseExpr(rl.Expr) + if err != nil { + return nil, err + } + + if rl.Alert != "" { + rls = append(rls, rules.NewAlertingRule( + rl.Alert, + expr, + time.Duration(rl.For), + labels.FromMap(rl.Labels), + labels.FromMap(rl.Annotations), + nil, + true, + log.With(util.Logger, "alert", rl.Alert), + )) + continue + } + rls = append(rls, rules.NewRecordingRule( + rl.Record, + expr, + labels.FromMap(rl.Labels), + )) + } + + // Group names have to be unique in Prometheus, but only within one rules file. + groups[rg.Name+";"+fn] = rls + } + } + + return groups, nil +} + +// parseV1 parses and validates the content of the rule files in a RulesConfig +// according to the Prometheus 1.x rule format. +// +// The same comment about rule groups as on ParseV2() applies here. +func (c RulesConfig) parseV1() (map[string][]rules.Rule, error) { + result := map[string][]rules.Rule{} + for fn, content := range c.Files { + stmts, err := legacy_promql.ParseStmts(content) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %s", fn, err) + } + ra := []rules.Rule{} + for _, stmt := range stmts { + var rule rules.Rule + + switch r := stmt.(type) { + case *legacy_promql.AlertStmt: + // legacy_promql.ParseStmts has parsed the whole rule for us. + // Ideally we'd just use r.Expr and pass that to rules.NewAlertingRule, + // but it is of the type legacy_proql.Expr and not promql.Expr. + // So we convert it back to a string, and then parse it again with the + // upstream parser to get it into the right type. + expr, err := promql.ParseExpr(r.Expr.String()) + if err != nil { + return nil, err + } + + rule = rules.NewAlertingRule( + r.Name, expr, r.Duration, r.Labels, r.Annotations, nil, true, + log.With(util.Logger, "alert", r.Name), + ) + + case *legacy_promql.RecordStmt: + expr, err := promql.ParseExpr(r.Expr.String()) + if err != nil { + return nil, err + } + + rule = rules.NewRecordingRule(r.Name, expr, r.Labels) + + default: + return nil, fmt.Errorf("ruler.GetRules: unknown statement type") + } + ra = append(ra, rule) + } + result[fn] = ra + } + return result, nil +} + +// VersionedRulesConfig is a RulesConfig together with a version. +// `data Versioned a = Versioned { id :: ID , config :: a }` +type VersionedRulesConfig struct { + ID ID `json:"id"` + Config RulesConfig `json:"config"` + DeletedAt time.Time `json:"deleted_at"` +} + +// IsDeleted tells you if the config is deleted. +func (vr VersionedRulesConfig) IsDeleted() bool { + return !vr.DeletedAt.IsZero() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go new file mode 100644 index 000000000000..d441967e1f52 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go @@ -0,0 +1,478 @@ +package cortex + +import ( + "bytes" + "context" + "flag" + "fmt" + "net/http" + "os" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + prom_storage "github.com/prometheus/prometheus/storage" + "github.com/weaveworks/common/middleware" + "github.com/weaveworks/common/server" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" + "gopkg.in/yaml.v2" + + "github.com/cortexproject/cortex/pkg/alertmanager" + "github.com/cortexproject/cortex/pkg/api" + "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/chunk/cache" + "github.com/cortexproject/cortex/pkg/chunk/encoding" + "github.com/cortexproject/cortex/pkg/chunk/purger" + "github.com/cortexproject/cortex/pkg/chunk/storage" + chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" + "github.com/cortexproject/cortex/pkg/compactor" + "github.com/cortexproject/cortex/pkg/configs" + configAPI "github.com/cortexproject/cortex/pkg/configs/api" + "github.com/cortexproject/cortex/pkg/configs/db" + "github.com/cortexproject/cortex/pkg/distributor" + "github.com/cortexproject/cortex/pkg/flusher" + "github.com/cortexproject/cortex/pkg/ingester" + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/querier" + "github.com/cortexproject/cortex/pkg/querier/frontend" + "github.com/cortexproject/cortex/pkg/querier/queryrange" + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" + "github.com/cortexproject/cortex/pkg/ruler" + "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/storegateway" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/grpc/healthcheck" + "github.com/cortexproject/cortex/pkg/util/runtimeconfig" + "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/validation" +) + +// The design pattern for Cortex is a series of config objects, which are +// registered for command line flags, and then a series of components that +// are instantiated and composed. Some rules of thumb: +// - Config types should only contain 'simple' types (ints, strings, urls etc). +// - Flag validation should be done by the flag; use a flag.Value where +// appropriate. +// - Config types should map 1:1 with a component type. +// - Config types should define flags with a common prefix. +// - It's fine to nest configs within configs, but this should match the +// nesting of components within components. +// - Limit as much is possible sharing of configuration between config types. +// Where necessary, use a pointer for this - avoid repetition. +// - Where a nesting of components its not obvious, it's fine to pass +// references to other components constructors to compose them. +// - First argument for a components constructor should be its matching config +// object. + +// Config is the root config for Cortex. +type Config struct { + Target ModuleName `yaml:"target"` + AuthEnabled bool `yaml:"auth_enabled"` + PrintConfig bool `yaml:"-"` + HTTPPrefix string `yaml:"http_prefix"` + + API api.Config `yaml:"api"` + Server server.Config `yaml:"server"` + Distributor distributor.Config `yaml:"distributor"` + Querier querier.Config `yaml:"querier"` + IngesterClient client.Config `yaml:"ingester_client"` + Ingester ingester.Config `yaml:"ingester"` + Flusher flusher.Config `yaml:"flusher"` + Storage storage.Config `yaml:"storage"` + ChunkStore chunk.StoreConfig `yaml:"chunk_store"` + Schema chunk.SchemaConfig `yaml:"schema" doc:"hidden"` // Doc generation tool doesn't support it because part of the SchemaConfig doesn't support CLI flags (needs manual documentation) + LimitsConfig validation.Limits `yaml:"limits"` + Prealloc client.PreallocConfig `yaml:"prealloc" doc:"hidden"` + Worker frontend.WorkerConfig `yaml:"frontend_worker"` + Frontend frontend.Config `yaml:"frontend"` + QueryRange queryrange.Config `yaml:"query_range"` + TableManager chunk.TableManagerConfig `yaml:"table_manager"` + Encoding encoding.Config `yaml:"-"` // No yaml for this, it only works with flags. + TSDB tsdb.Config `yaml:"tsdb"` + Compactor compactor.Config `yaml:"compactor"` + StoreGateway storegateway.Config `yaml:"store_gateway" doc:"hidden"` // this component is not yet finished. + DataPurgerConfig purger.Config `yaml:"purger"` + + Ruler ruler.Config `yaml:"ruler"` + Configs configs.Config `yaml:"configs"` + Alertmanager alertmanager.MultitenantAlertmanagerConfig `yaml:"alertmanager"` + RuntimeConfig runtimeconfig.ManagerConfig `yaml:"runtime_config"` + MemberlistKV memberlist.KVConfig `yaml:"memberlist"` +} + +// RegisterFlags registers flag. +func (c *Config) RegisterFlags(f *flag.FlagSet) { + c.Server.MetricsNamespace = "cortex" + c.Target = All + c.Server.ExcludeRequestInLog = true + f.Var(&c.Target, "target", "The Cortex service to run. Supported values are: all, distributor, ingester, querier, query-frontend, table-manager, ruler, alertmanager, configs.") + f.BoolVar(&c.AuthEnabled, "auth.enabled", true, "Set to false to disable auth.") + f.BoolVar(&c.PrintConfig, "print.config", false, "Print the config and exit.") + f.StringVar(&c.HTTPPrefix, "http.prefix", "/api/prom", "HTTP path prefix for Cortex API.") + + c.API.RegisterFlags(f) + c.Server.RegisterFlags(f) + c.Distributor.RegisterFlags(f) + c.Querier.RegisterFlags(f) + c.IngesterClient.RegisterFlags(f) + c.Ingester.RegisterFlags(f) + c.Flusher.RegisterFlags(f) + c.Storage.RegisterFlags(f) + c.ChunkStore.RegisterFlags(f) + c.Schema.RegisterFlags(f) + c.LimitsConfig.RegisterFlags(f) + c.Prealloc.RegisterFlags(f) + c.Worker.RegisterFlags(f) + c.Frontend.RegisterFlags(f) + c.QueryRange.RegisterFlags(f) + c.TableManager.RegisterFlags(f) + c.Encoding.RegisterFlags(f) + c.TSDB.RegisterFlags(f) + c.Compactor.RegisterFlags(f) + c.StoreGateway.RegisterFlags(f) + c.DataPurgerConfig.RegisterFlags(f) + + c.Ruler.RegisterFlags(f) + c.Configs.RegisterFlags(f) + c.Alertmanager.RegisterFlags(f) + c.RuntimeConfig.RegisterFlags(f) + c.MemberlistKV.RegisterFlags(f, "") + + // These don't seem to have a home. + flag.IntVar(&chunk_util.QueryParallelism, "querier.query-parallelism", 100, "Max subqueries run in parallel per higher-level query.") +} + +// Validate the cortex config and returns an error if the validation +// doesn't pass +func (c *Config) Validate(log log.Logger) error { + if err := c.Schema.Validate(); err != nil { + return errors.Wrap(err, "invalid schema config") + } + if err := c.Encoding.Validate(); err != nil { + return errors.Wrap(err, "invalid encoding config") + } + if err := c.Storage.Validate(); err != nil { + return errors.Wrap(err, "invalid storage config") + } + if err := c.TSDB.Validate(); err != nil { + return errors.Wrap(err, "invalid TSDB config") + } + if err := c.LimitsConfig.Validate(c.Distributor.ShardByAllLabels); err != nil { + return errors.Wrap(err, "invalid limits config") + } + if err := c.Distributor.Validate(); err != nil { + return errors.Wrap(err, "invalid distributor config") + } + if err := c.Querier.Validate(); err != nil { + return errors.Wrap(err, "invalid querier config") + } + if err := c.QueryRange.Validate(log); err != nil { + return errors.Wrap(err, "invalid queryrange config") + } + if err := c.TableManager.Validate(); err != nil { + return errors.Wrap(err, "invalid tablemanager config") + } + return nil +} + +// Cortex is the root datastructure for Cortex. +type Cortex struct { + target ModuleName + + // set during initialization + serviceMap map[ModuleName]services.Service + + api *api.API + server *server.Server + ring *ring.Ring + overrides *validation.Overrides + distributor *distributor.Distributor + ingester *ingester.Ingester + flusher *flusher.Flusher + store chunk.Store + deletesStore *purger.DeleteStore + frontend *frontend.Frontend + tableManager *chunk.TableManager + cache cache.Cache + runtimeConfig *runtimeconfig.Manager + dataPurger *purger.DataPurger + + ruler *ruler.Ruler + configAPI *configAPI.API + configDB db.DB + alertmanager *alertmanager.MultitenantAlertmanager + compactor *compactor.Compactor + storeGateway *storegateway.StoreGateway + memberlistKV *memberlist.KVInit + + // Queryable that the querier should use to query the long + // term storage. It depends on the storage engine used. + storeQueryable prom_storage.Queryable +} + +// New makes a new Cortex. +func New(cfg Config) (*Cortex, error) { + if cfg.PrintConfig { + if err := yaml.NewEncoder(os.Stdout).Encode(&cfg); err != nil { + fmt.Println("Error encoding config:", err) + } + os.Exit(0) + } + + cortex := &Cortex{ + target: cfg.Target, + } + + cortex.setupAuthMiddleware(&cfg) + + serviceMap, err := cortex.initModuleServices(&cfg, cfg.Target) + if err != nil { + return nil, err + } + + cortex.serviceMap = serviceMap + cortex.api.RegisterServiceMapHandler(http.HandlerFunc(cortex.servicesHandler)) + + return cortex, nil +} + +func (t *Cortex) setupAuthMiddleware(cfg *Config) { + if cfg.AuthEnabled { + cfg.Server.GRPCMiddleware = []grpc.UnaryServerInterceptor{ + middleware.ServerUserHeaderInterceptor, + } + cfg.Server.GRPCStreamMiddleware = []grpc.StreamServerInterceptor{ + func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + switch info.FullMethod { + // Don't check auth header on TransferChunks, as we weren't originally + // sending it and this could cause transfers to fail on update. + // + // Also don't check auth /frontend.Frontend/Process, as this handles + // queries for multiple users. + case "/cortex.Ingester/TransferChunks", "/frontend.Frontend/Process": + return handler(srv, ss) + default: + return middleware.StreamServerUserHeaderInterceptor(srv, ss, info, handler) + } + }, + } + } else { + cfg.Server.GRPCMiddleware = []grpc.UnaryServerInterceptor{ + fakeGRPCAuthUniaryMiddleware, + } + cfg.Server.GRPCStreamMiddleware = []grpc.StreamServerInterceptor{ + fakeGRPCAuthStreamMiddleware, + } + cfg.API.HTTPAuthMiddleware = fakeHTTPAuthMiddleware + } +} + +func (t *Cortex) initModuleServices(cfg *Config, target ModuleName) (map[ModuleName]services.Service, error) { + servicesMap := map[ModuleName]services.Service{} + + // initialize all of our dependencies first + deps := orderedDeps(target) + deps = append(deps, target) // lastly, initialize the requested module + + for ix, n := range deps { + mod := modules[n] + + var serv services.Service + + if mod.service != nil { + s, err := mod.service(t, cfg) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("error initialising module: %s", n)) + } + serv = s + } else if mod.wrappedService != nil { + s, err := mod.wrappedService(t, cfg) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("error initialising module: %s", n)) + } + if s != nil { + // We pass servicesMap, which isn't yet finished. By the time service starts, + // it will be fully built, so there is no need for extra synchronization. + serv = newModuleServiceWrapper(servicesMap, n, s, mod.deps, findInverseDependencies(n, deps[ix+1:])) + } + } + + if serv != nil { + servicesMap[n] = serv + } + } + + return servicesMap, nil +} + +// Run starts Cortex running, and blocks until a Cortex stops. +func (t *Cortex) Run() error { + // get all services, create service manager and tell it to start + servs := []services.Service(nil) + for _, s := range t.serviceMap { + servs = append(servs, s) + } + + sm, err := services.NewManager(servs...) + if err != nil { + return err + } + + // before starting servers, register /ready handler and gRPC health check service. + // It should reflect entire Cortex. + t.server.HTTP.Path("/ready").Handler(t.readyHandler(sm)) + grpc_health_v1.RegisterHealthServer(t.server.GRPC, healthcheck.New(sm)) + + // Let's listen for events from this manager, and log them. + healthy := func() { level.Info(util.Logger).Log("msg", "Cortex started") } + stopped := func() { level.Info(util.Logger).Log("msg", "Cortex stopped") } + serviceFailed := func(service services.Service) { + // if any service fails, stop entire Cortex + sm.StopAsync() + + // let's find out which module failed + for m, s := range t.serviceMap { + if s == service { + if service.FailureCase() == util.ErrStopProcess { + level.Info(util.Logger).Log("msg", "received stop signal via return error", "module", m, "error", service.FailureCase()) + } else { + level.Error(util.Logger).Log("msg", "module failed", "module", m, "error", service.FailureCase()) + } + return + } + } + + level.Error(util.Logger).Log("msg", "module failed", "module", "unknown", "error", service.FailureCase()) + } + + sm.AddListener(services.NewManagerListener(healthy, stopped, serviceFailed)) + + // Currently it's the Server that reacts on signal handler, + // so get Server service, and wait until it gets to Stopping state. + // It will also be stopped via service manager if any service fails (see attached service listener) + // Attach listener before starting services, or we may miss the notification. + serverStopping := make(chan struct{}) + t.serviceMap[Server].AddListener(services.NewListener(nil, nil, func(from services.State) { + close(serverStopping) + }, nil, nil)) + + // Start all services. This can really only fail if some service is already + // in other state than New, which should not be the case. + err = sm.StartAsync(context.Background()) + if err == nil { + // no error starting the services, now let's just wait until Server module + // transitions to Stopping (after SIGTERM or when some service fails), + // and then initiate shutdown + <-serverStopping + } + + // Stop all the services, and wait until they are all done. + // We don't care about this error, as it cannot really fail. + _ = services.StopManagerAndAwaitStopped(context.Background(), sm) + + // if any service failed, report that as an error to caller + if err == nil { + if failed := sm.ServicesByState()[services.Failed]; len(failed) > 0 { + for _, f := range failed { + if f.FailureCase() != util.ErrStopProcess { + // Details were reported via failure listener before + err = errors.New("failed services") + break + } + } + } + } + return err +} + +func (t *Cortex) readyHandler(sm *services.Manager) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !sm.IsHealthy() { + msg := bytes.Buffer{} + msg.WriteString("Some services are not Running:\n") + + byState := sm.ServicesByState() + for st, ls := range byState { + msg.WriteString(fmt.Sprintf("%v: %d\n", st, len(ls))) + } + + http.Error(w, msg.String(), http.StatusServiceUnavailable) + return + } + + // Ingester has a special check that makes sure that it was able to register into the ring, + // and that all other ring entries are OK too. + if t.ingester != nil { + if err := t.ingester.CheckReady(r.Context()); err != nil { + http.Error(w, "Ingester not ready: "+err.Error(), http.StatusServiceUnavailable) + return + } + } + + http.Error(w, "ready", http.StatusOK) + } +} + +// listDeps recursively gets a list of dependencies for a passed moduleName +func listDeps(m ModuleName) []ModuleName { + deps := modules[m].deps + for _, d := range modules[m].deps { + deps = append(deps, listDeps(d)...) + } + return deps +} + +// orderedDeps gets a list of all dependencies ordered so that items are always after any of their dependencies. +func orderedDeps(m ModuleName) []ModuleName { + deps := listDeps(m) + + // get a unique list of moduleNames, with a flag for whether they have been added to our result + uniq := map[ModuleName]bool{} + for _, dep := range deps { + uniq[dep] = false + } + + result := make([]ModuleName, 0, len(uniq)) + + // keep looping through all modules until they have all been added to the result. + + for len(result) < len(uniq) { + OUTER: + for name, added := range uniq { + if added { + continue + } + for _, dep := range modules[name].deps { + // stop processing this module if one of its dependencies has + // not been added to the result yet. + if !uniq[dep] { + continue OUTER + } + } + + // if all of the module's dependencies have been added to the result slice, + // then we can safely add this module to the result slice as well. + uniq[name] = true + result = append(result, name) + } + } + return result +} + +// find modules in the supplied list, that depend on mod +func findInverseDependencies(mod ModuleName, mods []ModuleName) []ModuleName { + result := []ModuleName(nil) + + for _, n := range mods { + for _, d := range modules[n].deps { + if d == mod { + result = append(result, n) + break + } + } + } + + return result +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/fake_auth.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/fake_auth.go new file mode 100644 index 000000000000..5e3d4c15d356 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/fake_auth.go @@ -0,0 +1,42 @@ +package cortex + +import ( + "context" + "net/http" + + "github.com/weaveworks/common/middleware" + "github.com/weaveworks/common/user" + "google.golang.org/grpc" +) + +// Fake auth middlewares just injects a fake userID, so the rest of the code +// can continue to be multitenant. + +var fakeHTTPAuthMiddleware = middleware.Func(func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := user.InjectOrgID(r.Context(), "fake") + next.ServeHTTP(w, r.WithContext(ctx)) + }) +}) + +var fakeGRPCAuthUniaryMiddleware = func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + ctx = user.InjectOrgID(ctx, "fake") + return handler(ctx, req) +} + +var fakeGRPCAuthStreamMiddleware = func(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + ctx := user.InjectOrgID(ss.Context(), "fake") + return handler(srv, serverStream{ + ctx: ctx, + ServerStream: ss, + }) +} + +type serverStream struct { + ctx context.Context + grpc.ServerStream +} + +func (ss serverStream) Context() context.Context { + return ss.ctx +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/index.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/index.go new file mode 100644 index 000000000000..e1fb282391e9 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/index.go @@ -0,0 +1 @@ +package cortex diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/module_service_wrapper.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/module_service_wrapper.go new file mode 100644 index 000000000000..2ac4cc87e533 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/module_service_wrapper.go @@ -0,0 +1,30 @@ +package cortex + +import ( + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" +) + +// This function wraps module service, and adds waiting for dependencies to start before starting, +// and dependant modules to stop before stopping this module service. +func newModuleServiceWrapper(serviceMap map[ModuleName]services.Service, mod ModuleName, modServ services.Service, startDeps []ModuleName, stopDeps []ModuleName) services.Service { + getDeps := func(deps []ModuleName) map[string]services.Service { + r := map[string]services.Service{} + for _, m := range deps { + s := serviceMap[m] + if s != nil { + r[string(m)] = s + } + } + return r + } + + return util.NewModuleService(string(mod), modServ, + func(_ string) map[string]services.Service { + return getDeps(startDeps) + }, + func(_ string) map[string]services.Service { + return getDeps(stopDeps) + }, + ) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go new file mode 100644 index 000000000000..adaeb89b3487 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go @@ -0,0 +1,629 @@ +package cortex + +import ( + "fmt" + "os" + "strings" + + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/promql" + httpgrpc_server "github.com/weaveworks/common/httpgrpc/server" + "github.com/weaveworks/common/server" + + "github.com/cortexproject/cortex/pkg/alertmanager" + "github.com/cortexproject/cortex/pkg/api" + "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/chunk/purger" + "github.com/cortexproject/cortex/pkg/chunk/storage" + "github.com/cortexproject/cortex/pkg/compactor" + configAPI "github.com/cortexproject/cortex/pkg/configs/api" + "github.com/cortexproject/cortex/pkg/configs/db" + "github.com/cortexproject/cortex/pkg/distributor" + "github.com/cortexproject/cortex/pkg/flusher" + "github.com/cortexproject/cortex/pkg/ingester" + "github.com/cortexproject/cortex/pkg/querier" + "github.com/cortexproject/cortex/pkg/querier/frontend" + "github.com/cortexproject/cortex/pkg/querier/queryrange" + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/ring/kv/codec" + "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" + "github.com/cortexproject/cortex/pkg/ruler" + "github.com/cortexproject/cortex/pkg/storegateway" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/runtimeconfig" + "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/validation" +) + +// ModuleName is used to describe a running module +type ModuleName string + +// The various modules that make up Cortex. +const ( + API ModuleName = "api" + Ring ModuleName = "ring" + RuntimeConfig ModuleName = "runtime-config" + Overrides ModuleName = "overrides" + Server ModuleName = "server" + Distributor ModuleName = "distributor" + Ingester ModuleName = "ingester" + Flusher ModuleName = "flusher" + Querier ModuleName = "querier" + StoreQueryable ModuleName = "store-queryable" + QueryFrontend ModuleName = "query-frontend" + Store ModuleName = "store" + DeleteRequestsStore ModuleName = "delete-requests-store" + TableManager ModuleName = "table-manager" + Ruler ModuleName = "ruler" + Configs ModuleName = "configs" + AlertManager ModuleName = "alertmanager" + Compactor ModuleName = "compactor" + StoreGateway ModuleName = "store-gateway" + MemberlistKV ModuleName = "memberlist-kv" + DataPurger ModuleName = "data-purger" + All ModuleName = "all" +) + +func (m ModuleName) String() string { + return string(m) +} + +func (m *ModuleName) Set(s string) error { + l := ModuleName(strings.ToLower(s)) + if _, ok := modules[l]; !ok { + return fmt.Errorf("unrecognised module name: %s", s) + } + *m = l + return nil +} + +func (m ModuleName) MarshalYAML() (interface{}, error) { + return m.String(), nil +} + +func (m *ModuleName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + return m.Set(s) +} + +func (t *Cortex) initAPI(cfg *Config) (services.Service, error) { + cfg.API.ServerPrefix = cfg.Server.PathPrefix + cfg.API.LegacyHTTPPrefix = cfg.HTTPPrefix + + a, err := api.New(cfg.API, t.server, util.Logger) + if err != nil { + return nil, err + } + + t.api = a + + t.api.RegisterAPI(cfg) + + return nil, nil +} + +func (t *Cortex) initServer(cfg *Config) (services.Service, error) { + serv, err := server.New(cfg.Server) + if err != nil { + return nil, err + } + + t.server = serv + + servicesToWaitFor := func() []services.Service { + svs := []services.Service(nil) + for m, s := range t.serviceMap { + // Server should not wait for itself. + if m != Server { + svs = append(svs, s) + } + } + return svs + } + + s := NewServerService(t.server, servicesToWaitFor) + + return s, nil +} + +func (t *Cortex) initRing(cfg *Config) (serv services.Service, err error) { + cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) + cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV + t.ring, err = ring.New(cfg.Ingester.LifecyclerConfig.RingConfig, "ingester", ring.IngesterRingKey) + if err != nil { + return nil, err + } + prometheus.MustRegister(t.ring) + + t.api.RegisterRing(t.ring) + + return t.ring, nil +} + +func (t *Cortex) initRuntimeConfig(cfg *Config) (services.Service, error) { + if cfg.RuntimeConfig.LoadPath == "" { + cfg.RuntimeConfig.LoadPath = cfg.LimitsConfig.PerTenantOverrideConfig + cfg.RuntimeConfig.ReloadPeriod = cfg.LimitsConfig.PerTenantOverridePeriod + } + cfg.RuntimeConfig.Loader = loadRuntimeConfig + + // make sure to set default limits before we start loading configuration into memory + validation.SetDefaultLimitsForYAMLUnmarshalling(cfg.LimitsConfig) + + serv, err := runtimeconfig.NewRuntimeConfigManager(cfg.RuntimeConfig, prometheus.DefaultRegisterer) + t.runtimeConfig = serv + return serv, err +} + +func (t *Cortex) initOverrides(cfg *Config) (serv services.Service, err error) { + t.overrides, err = validation.NewOverrides(cfg.LimitsConfig, tenantLimitsFromRuntimeConfig(t.runtimeConfig)) + // overrides don't have operational state, nor do they need to do anything more in starting/stopping phase, + // so there is no need to return any service. + return nil, err +} + +func (t *Cortex) initDistributor(cfg *Config) (serv services.Service, err error) { + cfg.Distributor.DistributorRing.ListenPort = cfg.Server.GRPCListenPort + cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV + + // Check whether the distributor can join the distributors ring, which is + // whenever it's not running as an internal dependency (ie. querier or + // ruler's dependency) + canJoinDistributorsRing := (cfg.Target == All || cfg.Target == Distributor) + + t.distributor, err = distributor.New(cfg.Distributor, cfg.IngesterClient, t.overrides, t.ring, canJoinDistributorsRing) + if err != nil { + return + } + + t.api.RegisterDistributor(t.distributor, cfg.Distributor) + + return t.distributor, nil +} + +func (t *Cortex) initQuerier(cfg *Config) (serv services.Service, err error) { + var tombstonesLoader *purger.TombstonesLoader + if cfg.DataPurgerConfig.Enable { + tombstonesLoader = purger.NewTombstonesLoader(t.deletesStore) + } else { + // until we need to explicitly enable delete series support we need to do create TombstonesLoader without DeleteStore which acts as noop + tombstonesLoader = purger.NewTombstonesLoader(nil) + } + + queryable, engine := querier.New(cfg.Querier, t.distributor, t.storeQueryable, tombstonesLoader, prometheus.DefaultRegisterer) + + // if we are not configured for single binary mode then the querier needs to register its paths externally + registerExternally := cfg.Target != All + handler := t.api.RegisterQuerier(queryable, engine, t.distributor, registerExternally) + + // single binary mode requires a properly configured worker. if the operator did not attempt to configure the + // worker we will attempt an automatic configuration here + if cfg.Worker.Address == "" && cfg.Target == All { + address := fmt.Sprintf("127.0.0.1:%d", cfg.Server.GRPCListenPort) + level.Warn(util.Logger).Log("msg", "Worker address is empty in single binary mode. Attempting automatic worker configuration. If queries are unresponsive consider configuring the worker explicitly.", "address", address) + cfg.Worker.Address = address + } + + // Query frontend worker will only be started after all its dependencies are started, not here. + // Worker may also be nil, if not configured, which is OK. + worker, err := frontend.NewWorker(cfg.Worker, httpgrpc_server.NewServer(handler), util.Logger) + if err != nil { + return + } + + return worker, nil +} + +func (t *Cortex) initStoreQueryable(cfg *Config) (services.Service, error) { + if cfg.Storage.Engine == storage.StorageEngineChunks { + t.storeQueryable = querier.NewChunkStoreQueryable(cfg.Querier, t.store) + return nil, nil + } + + if cfg.Storage.Engine == storage.StorageEngineTSDB { + storeQueryable, err := querier.NewBlockQueryable(cfg.TSDB, cfg.Server.LogLevel, prometheus.DefaultRegisterer) + if err != nil { + return nil, err + } + t.storeQueryable = storeQueryable + return storeQueryable, nil + } + + return nil, fmt.Errorf("unknown storage engine '%s'", cfg.Storage.Engine) +} + +func (t *Cortex) initIngester(cfg *Config) (serv services.Service, err error) { + cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.runtimeConfig) + cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV + cfg.Ingester.LifecyclerConfig.ListenPort = &cfg.Server.GRPCListenPort + cfg.Ingester.TSDBEnabled = cfg.Storage.Engine == storage.StorageEngineTSDB + cfg.Ingester.TSDBConfig = cfg.TSDB + cfg.Ingester.ShardByAllLabels = cfg.Distributor.ShardByAllLabels + + t.ingester, err = ingester.New(cfg.Ingester, cfg.IngesterClient, t.overrides, t.store, prometheus.DefaultRegisterer) + if err != nil { + return + } + + t.api.RegisterIngester(t.ingester, cfg.Distributor) + + return t.ingester, nil +} + +func (t *Cortex) initFlusher(cfg *Config) (serv services.Service, err error) { + t.flusher, err = flusher.New( + cfg.Flusher, + cfg.Ingester, + cfg.IngesterClient, + t.store, + prometheus.DefaultRegisterer, + ) + if err != nil { + return + } + + return t.flusher, nil +} + +func (t *Cortex) initStore(cfg *Config) (serv services.Service, err error) { + if cfg.Storage.Engine == storage.StorageEngineTSDB { + return nil, nil + } + err = cfg.Schema.Load() + if err != nil { + return + } + + t.store, err = storage.NewStore(cfg.Storage, cfg.ChunkStore, cfg.Schema, t.overrides) + if err != nil { + return + } + + return services.NewIdleService(nil, func(_ error) error { + t.store.Stop() + return nil + }), nil +} + +func (t *Cortex) initDeleteRequestsStore(cfg *Config) (serv services.Service, err error) { + if !cfg.DataPurgerConfig.Enable { + return + } + + var indexClient chunk.IndexClient + indexClient, err = storage.NewIndexClient(cfg.Storage.DeleteStoreConfig.Store, cfg.Storage, cfg.Schema) + if err != nil { + return + } + + t.deletesStore, err = purger.NewDeleteStore(cfg.Storage.DeleteStoreConfig, indexClient) + if err != nil { + return + } + + return +} + +func (t *Cortex) initQueryFrontend(cfg *Config) (serv services.Service, err error) { + // Load the schema only if sharded queries is set. + if cfg.QueryRange.ShardedQueries { + err = cfg.Schema.Load() + if err != nil { + return + } + } + + t.frontend, err = frontend.New(cfg.Frontend, util.Logger, prometheus.DefaultRegisterer) + if err != nil { + return + } + tripperware, cache, err := queryrange.NewTripperware( + cfg.QueryRange, + util.Logger, + t.overrides, + queryrange.PrometheusCodec, + queryrange.PrometheusResponseExtractor, + cfg.Schema, + promql.EngineOpts{ + Logger: util.Logger, + Reg: prometheus.DefaultRegisterer, + MaxSamples: cfg.Querier.MaxSamples, + Timeout: cfg.Querier.Timeout, + }, + cfg.Querier.QueryIngestersWithin, + prometheus.DefaultRegisterer, + ) + + if err != nil { + return nil, err + } + t.cache = cache + t.frontend.Wrap(tripperware) + + t.api.RegisterQueryFrontend(t.frontend) + + return services.NewIdleService(nil, func(_ error) error { + t.frontend.Close() + if t.cache != nil { + t.cache.Stop() + t.cache = nil + } + return nil + }), nil +} + +func (t *Cortex) initTableManager(cfg *Config) (services.Service, error) { + if cfg.Storage.Engine == storage.StorageEngineTSDB { + return nil, nil // table manager isn't used in v2 + } + + err := cfg.Schema.Load() + if err != nil { + return nil, err + } + + // Assume the newest config is the one to use + lastConfig := &cfg.Schema.Configs[len(cfg.Schema.Configs)-1] + + if (cfg.TableManager.ChunkTables.WriteScale.Enabled || + cfg.TableManager.IndexTables.WriteScale.Enabled || + cfg.TableManager.ChunkTables.InactiveWriteScale.Enabled || + cfg.TableManager.IndexTables.InactiveWriteScale.Enabled || + cfg.TableManager.ChunkTables.ReadScale.Enabled || + cfg.TableManager.IndexTables.ReadScale.Enabled || + cfg.TableManager.ChunkTables.InactiveReadScale.Enabled || + cfg.TableManager.IndexTables.InactiveReadScale.Enabled) && + cfg.Storage.AWSStorageConfig.Metrics.URL == "" { + level.Error(util.Logger).Log("msg", "WriteScale is enabled but no Metrics URL has been provided") + os.Exit(1) + } + + tableClient, err := storage.NewTableClient(lastConfig.IndexType, cfg.Storage) + if err != nil { + return nil, err + } + + bucketClient, err := storage.NewBucketClient(cfg.Storage) + util.CheckFatal("initializing bucket client", err) + + t.tableManager, err = chunk.NewTableManager(cfg.TableManager, cfg.Schema, cfg.Ingester.MaxChunkAge, tableClient, bucketClient, prometheus.DefaultRegisterer) + return t.tableManager, err +} + +func (t *Cortex) initRuler(cfg *Config) (serv services.Service, err error) { + var tombstonesLoader *purger.TombstonesLoader + if cfg.DataPurgerConfig.Enable { + tombstonesLoader = purger.NewTombstonesLoader(t.deletesStore) + } else { + // until we need to explicitly enable delete series support we need to do create TombstonesLoader without DeleteStore which acts as noop + tombstonesLoader = purger.NewTombstonesLoader(nil) + } + + cfg.Ruler.Ring.ListenPort = cfg.Server.GRPCListenPort + cfg.Ruler.Ring.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV + queryable, engine := querier.New(cfg.Querier, t.distributor, t.storeQueryable, tombstonesLoader, prometheus.DefaultRegisterer) + + t.ruler, err = ruler.NewRuler(cfg.Ruler, engine, queryable, t.distributor, prometheus.DefaultRegisterer, util.Logger) + if err != nil { + return + } + + // Expose HTTP endpoints. + t.api.RegisterRuler(t.ruler, cfg.Ruler.EnableAPI) + + return t.ruler, nil +} + +func (t *Cortex) initConfig(cfg *Config) (serv services.Service, err error) { + t.configDB, err = db.New(cfg.Configs.DB) + if err != nil { + return + } + + t.configAPI = configAPI.New(t.configDB, cfg.Configs.API) + t.configAPI.RegisterRoutes(t.server.HTTP) + return services.NewIdleService(nil, func(_ error) error { + t.configDB.Close() + return nil + }), nil +} + +func (t *Cortex) initAlertManager(cfg *Config) (serv services.Service, err error) { + t.alertmanager, err = alertmanager.NewMultitenantAlertmanager(&cfg.Alertmanager, util.Logger, prometheus.DefaultRegisterer) + if err != nil { + return + } + t.api.RegisterAlertmanager(t.alertmanager, cfg.Target == AlertManager) + return t.alertmanager, nil +} + +func (t *Cortex) initCompactor(cfg *Config) (serv services.Service, err error) { + cfg.Compactor.ShardingRing.ListenPort = cfg.Server.GRPCListenPort + cfg.Compactor.ShardingRing.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV + + t.compactor, err = compactor.NewCompactor(cfg.Compactor, cfg.TSDB, util.Logger, prometheus.DefaultRegisterer) + if err != nil { + return + } + + // Expose HTTP endpoints. + t.api.RegisterCompactor(t.compactor) + return t.compactor, nil +} + +func (t *Cortex) initStoreGateway(cfg *Config) (serv services.Service, err error) { + if cfg.Storage.Engine != storage.StorageEngineTSDB { + return nil, nil + } + + cfg.StoreGateway.ShardingRing.ListenPort = cfg.Server.GRPCListenPort + cfg.StoreGateway.ShardingRing.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV + + t.storeGateway, err = storegateway.NewStoreGateway(cfg.StoreGateway, cfg.TSDB, cfg.Server.LogLevel, util.Logger, prometheus.DefaultRegisterer) + if err != nil { + return nil, err + } + + // Expose HTTP endpoints. + t.api.RegisterStoreGateway(t.storeGateway) + + return t.storeGateway, nil +} + +func (t *Cortex) initMemberlistKV(cfg *Config) (services.Service, error) { + cfg.MemberlistKV.MetricsRegisterer = prometheus.DefaultRegisterer + cfg.MemberlistKV.Codecs = []codec.Codec{ + ring.GetCodec(), + } + t.memberlistKV = memberlist.NewKVInit(&cfg.MemberlistKV) + + return services.NewIdleService(nil, func(_ error) error { + t.memberlistKV.Stop() + return nil + }), nil +} + +func (t *Cortex) initDataPurger(cfg *Config) (services.Service, error) { + if !cfg.DataPurgerConfig.Enable { + return nil, nil + } + + storageClient, err := storage.NewObjectClient(cfg.DataPurgerConfig.ObjectStoreType, cfg.Storage) + if err != nil { + return nil, err + } + + t.dataPurger, err = purger.NewDataPurger(cfg.DataPurgerConfig, t.deletesStore, t.store, storageClient) + if err != nil { + return nil, err + } + + t.api.RegisterPurger(t.deletesStore) + + return t.dataPurger, nil +} + +type module struct { + deps []ModuleName + + // service for this module (can return nil) + service func(t *Cortex, cfg *Config) (services.Service, error) + + // service that will be wrapped into moduleServiceWrapper, to wait for dependencies to start / end + // (can return nil) + wrappedService func(t *Cortex, cfg *Config) (services.Service, error) +} + +var modules = map[ModuleName]module{ + Server: { + // we cannot use 'wrappedService', as stopped Server service is currently a signal to Cortex + // that it should shutdown. If we used wrappedService, it wouldn't stop until + // all services that depend on it stopped first... but there is nothing that would make them stop. + service: (*Cortex).initServer, + }, + + API: { + deps: []ModuleName{Server}, + wrappedService: (*Cortex).initAPI, + }, + + RuntimeConfig: { + wrappedService: (*Cortex).initRuntimeConfig, + }, + + MemberlistKV: { + wrappedService: (*Cortex).initMemberlistKV, + }, + + Ring: { + deps: []ModuleName{API, RuntimeConfig, MemberlistKV}, + wrappedService: (*Cortex).initRing, + }, + + Overrides: { + deps: []ModuleName{RuntimeConfig}, + wrappedService: (*Cortex).initOverrides, + }, + + Distributor: { + deps: []ModuleName{Ring, API, Overrides}, + wrappedService: (*Cortex).initDistributor, + }, + + Store: { + deps: []ModuleName{Overrides}, + wrappedService: (*Cortex).initStore, + }, + + DeleteRequestsStore: { + wrappedService: (*Cortex).initDeleteRequestsStore, + }, + + Ingester: { + deps: []ModuleName{Overrides, Store, API, RuntimeConfig, MemberlistKV}, + wrappedService: (*Cortex).initIngester, + }, + + Flusher: { + deps: []ModuleName{Store, API}, + wrappedService: (*Cortex).initFlusher, + }, + + Querier: { + deps: []ModuleName{Distributor, Store, Ring, API, StoreQueryable}, + wrappedService: (*Cortex).initQuerier, + }, + + StoreQueryable: { + deps: []ModuleName{Store, DeleteRequestsStore}, + wrappedService: (*Cortex).initStoreQueryable, + }, + + QueryFrontend: { + deps: []ModuleName{API, Overrides}, + wrappedService: (*Cortex).initQueryFrontend, + }, + + TableManager: { + deps: []ModuleName{API}, + wrappedService: (*Cortex).initTableManager, + }, + + Ruler: { + deps: []ModuleName{Distributor, Store, StoreQueryable}, + wrappedService: (*Cortex).initRuler, + }, + + Configs: { + deps: []ModuleName{API}, + wrappedService: (*Cortex).initConfig, + }, + + AlertManager: { + deps: []ModuleName{API}, + wrappedService: (*Cortex).initAlertManager, + }, + + Compactor: { + deps: []ModuleName{API}, + wrappedService: (*Cortex).initCompactor, + }, + + StoreGateway: { + deps: []ModuleName{API}, + wrappedService: (*Cortex).initStoreGateway, + }, + + DataPurger: { + deps: []ModuleName{Store, DeleteRequestsStore, API}, + wrappedService: (*Cortex).initDataPurger, + }, + + All: { + deps: []ModuleName{QueryFrontend, Querier, Ingester, Distributor, TableManager, DataPurger, StoreGateway}, + }, +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go new file mode 100644 index 000000000000..8f914839720d --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go @@ -0,0 +1,72 @@ +package cortex + +import ( + "os" + + "gopkg.in/yaml.v2" + + "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/util/runtimeconfig" + "github.com/cortexproject/cortex/pkg/util/validation" +) + +// runtimeConfigValues are values that can be reloaded from configuration file while Cortex is running. +// Reloading is done by runtime_config.Manager, which also keeps the currently loaded config. +// These values are then pushed to the components that are interested in them. +type runtimeConfigValues struct { + TenantLimits map[string]*validation.Limits `yaml:"overrides"` + + Multi kv.MultiRuntimeConfig `yaml:"multi_kv_config"` +} + +func loadRuntimeConfig(filename string) (interface{}, error) { + f, err := os.Open(filename) + if err != nil { + return nil, err + } + + var overrides = &runtimeConfigValues{} + + decoder := yaml.NewDecoder(f) + decoder.SetStrict(true) + if err := decoder.Decode(&overrides); err != nil { + return nil, err + } + + return overrides, nil +} + +func tenantLimitsFromRuntimeConfig(c *runtimeconfig.Manager) validation.TenantLimits { + return func(userID string) *validation.Limits { + cfg, ok := c.GetConfig().(*runtimeConfigValues) + if !ok || cfg == nil { + return nil + } + + return cfg.TenantLimits[userID] + } +} + +func multiClientRuntimeConfigChannel(manager *runtimeconfig.Manager) func() <-chan kv.MultiRuntimeConfig { + // returns function that can be used in MultiConfig.ConfigProvider + return func() <-chan kv.MultiRuntimeConfig { + outCh := make(chan kv.MultiRuntimeConfig, 1) + + // push initial config to the channel + val := manager.GetConfig() + if cfg, ok := val.(*runtimeConfigValues); ok && cfg != nil { + outCh <- cfg.Multi + } + + ch := manager.CreateListenerChannel(1) + go func() { + for val := range ch { + if cfg, ok := val.(*runtimeConfigValues); ok && cfg != nil { + outCh <- cfg.Multi + } + } + }() + + return outCh + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go new file mode 100644 index 000000000000..a16f67ac5c6d --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go @@ -0,0 +1,53 @@ +package cortex + +import ( + "context" + + "github.com/go-kit/kit/log/level" + "github.com/weaveworks/common/server" + + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" +) + +// NewServerService constructs service from Server component. +// servicesToWaitFor is called when server is stopping, and should return all +// services that need to terminate before server actually stops. +// N.B.: this function is NOT Cortex specific, please let's keep it that way. +func NewServerService(serv *server.Server, servicesToWaitFor func() []services.Service) services.Service { + serverDone := make(chan error, 1) + + runFn := func(ctx context.Context) error { + go func() { + defer close(serverDone) + serverDone <- serv.Run() + }() + + select { + case <-ctx.Done(): + return nil + case err := <-serverDone: + if err != nil { + level.Error(util.Logger).Log("msg", "server failed", "err", err) + } + return err + } + } + + stoppingFn := func(_ error) error { + // wait until all modules are done, and then shutdown server. + for _, s := range servicesToWaitFor() { + _ = s.AwaitTerminated(context.Background()) + } + + // shutdown HTTP and gRPC servers (this also unblocks Run) + serv.Shutdown() + + // if not closed yet, wait until server stops. + <-serverDone + level.Info(util.Logger).Log("msg", "server stopped") + return nil + } + + return services.NewBasicService(nil, runFn, stoppingFn) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/status.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/status.go new file mode 100644 index 000000000000..dec9a0f1e9b0 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/status.go @@ -0,0 +1,18 @@ +package cortex + +import ( + "fmt" + "net/http" +) + +func (t *Cortex) servicesHandler(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(200) + w.Header().Set("Content-Type", "text/plain") + + // TODO: this could be extended to also print sub-services, if given service has any + for mod, s := range t.serviceMap { + if s != nil { + fmt.Fprintf(w, "%v => %v\n", mod, s.State()) + } + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go b/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go new file mode 100644 index 000000000000..b9d101be932b --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go @@ -0,0 +1,93 @@ +package flusher + +import ( + "context" + "flag" + "time" + + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + + "github.com/cortexproject/cortex/pkg/ingester" + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" +) + +// Config for an Ingester. +type Config struct { + WALDir string `yaml:"wal_dir"` + ConcurrentFlushes int `yaml:"concurrent_flushes"` + FlushOpTimeout time.Duration `yaml:"flush_op_timeout"` +} + +// RegisterFlags adds the flags required to config this to the given FlagSet +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.WALDir, "flusher.wal-dir", "wal", "Directory to read WAL from.") + f.IntVar(&cfg.ConcurrentFlushes, "flusher.concurrent-flushes", 50, "Number of concurrent goroutines flushing to dynamodb.") + f.DurationVar(&cfg.FlushOpTimeout, "flusher.flush-op-timeout", 2*time.Minute, "Timeout for individual flush operations.") +} + +// Flusher is designed to be used as a job to flush the chunks from the WAL on disk. +type Flusher struct { + services.Service + + cfg Config + ingesterConfig ingester.Config + clientConfig client.Config + chunkStore ingester.ChunkStore + registerer prometheus.Registerer +} + +const ( + postFlushSleepTime = 1 * time.Minute +) + +// New constructs a new Flusher and flushes the data from the WAL. +// The returned Flusher has no other operations. +func New( + cfg Config, + ingesterConfig ingester.Config, + clientConfig client.Config, + chunkStore ingester.ChunkStore, + registerer prometheus.Registerer, +) (*Flusher, error) { + + ingesterConfig.WALConfig.Dir = cfg.WALDir + ingesterConfig.ConcurrentFlushes = cfg.ConcurrentFlushes + ingesterConfig.FlushOpTimeout = cfg.FlushOpTimeout + + f := &Flusher{ + cfg: cfg, + ingesterConfig: ingesterConfig, + clientConfig: clientConfig, + chunkStore: chunkStore, + registerer: registerer, + } + f.Service = services.NewBasicService(nil, f.running, nil) + return f, nil +} + +func (f *Flusher) running(ctx context.Context) error { + ing, err := ingester.NewForFlusher(f.ingesterConfig, f.clientConfig, f.chunkStore, f.registerer) + if err != nil { + return errors.Wrap(err, "create ingester") + } + + if err := services.StartAndAwaitRunning(ctx, ing); err != nil { + return errors.Wrap(err, "start and await running ingester") + } + + ing.Flush() + + // Sleeping to give a chance to Prometheus + // to collect the metrics. + level.Info(util.Logger).Log("msg", "sleeping to give chance for collection of metrics", "duration", postFlushSleepTime.String()) + time.Sleep(postFlushSleepTime) + + if err := services.StopAndAwaitTerminated(ctx, ing); err != nil { + return errors.Wrap(err, "stop and await terminated ingester") + } + return util.ErrStopProcess +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/errors.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/errors.go new file mode 100644 index 000000000000..c4c357e24060 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/errors.go @@ -0,0 +1,73 @@ +package ingester + +import ( + "fmt" + "net/http" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/weaveworks/common/httpgrpc" +) + +type validationError struct { + err error // underlying error + errorType string + code int + noReport bool // if true, error will be counted but not reported to caller + labels labels.Labels +} + +func makeLimitError(errorType string, err error) error { + return &validationError{ + errorType: errorType, + err: err, + code: http.StatusTooManyRequests, + } +} + +func makeNoReportError(errorType string) error { + return &validationError{ + errorType: errorType, + noReport: true, + } +} + +func makeMetricValidationError(errorType string, labels labels.Labels, err error) error { + return &validationError{ + errorType: errorType, + err: err, + code: http.StatusBadRequest, + labels: labels, + } +} + +func makeMetricLimitError(errorType string, labels labels.Labels, err error) error { + return &validationError{ + errorType: errorType, + err: err, + code: http.StatusTooManyRequests, + labels: labels, + } +} + +func (e *validationError) Error() string { + if e.err == nil { + return e.errorType + } + if e.labels == nil { + return e.err.Error() + } + return fmt.Sprintf("%s for series %s", e.err.Error(), e.labels.String()) +} + +// returns a HTTP gRPC error than is correctly forwarded over gRPC, with no reference to `e` retained. +func grpcForwardableError(userID string, code int, e error) error { + return httpgrpc.ErrorFromHTTPResponse(&httpgrpc.HTTPResponse{ + Code: int32(code), + Body: []byte(wrapWithUser(e, userID).Error()), + }) +} + +// Note: does not retain a reference to `err` +func wrapWithUser(err error, userID string) error { + return fmt.Errorf("user=%s: %s", userID, err) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go new file mode 100644 index 000000000000..46de627fd76d --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go @@ -0,0 +1,357 @@ +package ingester + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/go-kit/kit/log/level" + ot "github.com/opentracing/opentracing-go" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + + "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/util" +) + +const ( + // Backoff for retrying 'immediate' flushes. Only counts for queue + // position, not wallclock time. + flushBackoff = 1 * time.Second +) + +// Flush triggers a flush of all the chunks and closes the flush queues. +// Called from the Lifecycler as part of the ingester shutdown. +func (i *Ingester) Flush() { + level.Info(util.Logger).Log("msg", "starting to flush all the chunks") + i.sweepUsers(true) + level.Info(util.Logger).Log("msg", "flushing of chunks complete") + + // Close the flush queues, to unblock waiting workers. + for _, flushQueue := range i.flushQueues { + flushQueue.Close() + } + + i.flushQueuesDone.Wait() +} + +// FlushHandler triggers a flush of all in memory chunks. Mainly used for +// local testing. +func (i *Ingester) FlushHandler(w http.ResponseWriter, r *http.Request) { + level.Info(util.Logger).Log("msg", "starting to flush all the chunks") + i.sweepUsers(true) + level.Info(util.Logger).Log("msg", "flushing of chunks complete") + w.WriteHeader(http.StatusNoContent) +} + +type flushOp struct { + from model.Time + userID string + fp model.Fingerprint + immediate bool +} + +func (o *flushOp) Key() string { + return fmt.Sprintf("%s-%d-%v", o.userID, o.fp, o.immediate) +} + +func (o *flushOp) Priority() int64 { + return -int64(o.from) +} + +// sweepUsers periodically schedules series for flushing and garbage collects users with no series +func (i *Ingester) sweepUsers(immediate bool) { + if i.chunkStore == nil { + return + } + + oldest := model.Time(0) + + for id, state := range i.userStates.cp() { + for pair := range state.fpToSeries.iter() { + state.fpLocker.Lock(pair.fp) + i.sweepSeries(id, pair.fp, pair.series, immediate) + i.removeFlushedChunks(state, pair.fp, pair.series) + first := pair.series.firstUnflushedChunkTime() + state.fpLocker.Unlock(pair.fp) + + if first > 0 && (oldest == 0 || first < oldest) { + oldest = first + } + } + } + + i.metrics.oldestUnflushedChunkTimestamp.Set(float64(oldest.Unix())) +} + +type flushReason int8 + +const ( + noFlush = iota + reasonImmediate + reasonMultipleChunksInSeries + reasonAged + reasonIdle + reasonStale + reasonSpreadFlush +) + +func (f flushReason) String() string { + switch f { + case noFlush: + return "NoFlush" + case reasonImmediate: + return "Immediate" + case reasonMultipleChunksInSeries: + return "MultipleChunksInSeries" + case reasonAged: + return "Aged" + case reasonIdle: + return "Idle" + case reasonStale: + return "Stale" + case reasonSpreadFlush: + return "Spread" + default: + panic("unrecognised flushReason") + } +} + +// sweepSeries schedules a series for flushing based on a set of criteria +// +// NB we don't close the head chunk here, as the series could wait in the queue +// for some time, and we want to encourage chunks to be as full as possible. +func (i *Ingester) sweepSeries(userID string, fp model.Fingerprint, series *memorySeries, immediate bool) { + if len(series.chunkDescs) <= 0 { + return + } + + firstTime := series.firstTime() + flush := i.shouldFlushSeries(series, fp, immediate) + if flush == noFlush { + return + } + + flushQueueIndex := int(uint64(fp) % uint64(i.cfg.ConcurrentFlushes)) + if i.flushQueues[flushQueueIndex].Enqueue(&flushOp{firstTime, userID, fp, immediate}) { + i.metrics.flushReasons.WithLabelValues(flush.String()).Inc() + util.Event().Log("msg", "add to flush queue", "userID", userID, "reason", flush, "firstTime", firstTime, "fp", fp, "series", series.metric, "nlabels", len(series.metric), "queue", flushQueueIndex) + } +} + +func (i *Ingester) shouldFlushSeries(series *memorySeries, fp model.Fingerprint, immediate bool) flushReason { + if len(series.chunkDescs) == 0 { + return noFlush + } + if immediate { + return reasonImmediate + } + + // Flush if we have more than one chunk, and haven't already flushed the first chunk + if len(series.chunkDescs) > 1 && !series.chunkDescs[0].flushed { + if series.chunkDescs[0].flushReason != noFlush { + return series.chunkDescs[0].flushReason + } + return reasonMultipleChunksInSeries + } + // Otherwise look in more detail at the first chunk + return i.shouldFlushChunk(series.chunkDescs[0], fp, series.isStale()) +} + +func (i *Ingester) shouldFlushChunk(c *desc, fp model.Fingerprint, lastValueIsStale bool) flushReason { + if c.flushed { // don't flush chunks we've already flushed + return noFlush + } + + // Adjust max age slightly to spread flushes out over time + var jitter time.Duration + if i.cfg.ChunkAgeJitter != 0 { + jitter = time.Duration(fp) % i.cfg.ChunkAgeJitter + } + // Chunks should be flushed if they span longer than MaxChunkAge + if c.LastTime.Sub(c.FirstTime) > (i.cfg.MaxChunkAge - jitter) { + return reasonAged + } + + // Chunk should be flushed if their last update is older then MaxChunkIdle. + if model.Now().Sub(c.LastUpdate) > i.cfg.MaxChunkIdle { + return reasonIdle + } + + // A chunk that has a stale marker can be flushed if possible. + if i.cfg.MaxStaleChunkIdle > 0 && + lastValueIsStale && + model.Now().Sub(c.LastUpdate) > i.cfg.MaxStaleChunkIdle { + return reasonStale + } + + return noFlush +} + +func (i *Ingester) flushLoop(j int) { + defer func() { + level.Debug(util.Logger).Log("msg", "Ingester.flushLoop() exited") + i.flushQueuesDone.Done() + }() + + for { + o := i.flushQueues[j].Dequeue() + if o == nil { + return + } + op := o.(*flushOp) + + err := i.flushUserSeries(j, op.userID, op.fp, op.immediate) + if err != nil { + level.Error(util.WithUserID(op.userID, util.Logger)).Log("msg", "failed to flush user", "err", err) + } + + // If we're exiting & we failed to flush, put the failed operation + // back in the queue at a later point. + if op.immediate && err != nil { + op.from = op.from.Add(flushBackoff) + i.flushQueues[j].Enqueue(op) + } + } +} + +func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model.Fingerprint, immediate bool) error { + if i.preFlushUserSeries != nil { + i.preFlushUserSeries() + } + + userState, ok := i.userStates.get(userID) + if !ok { + return nil + } + + series, ok := userState.fpToSeries.get(fp) + if !ok { + return nil + } + + userState.fpLocker.Lock(fp) + reason := i.shouldFlushSeries(series, fp, immediate) + if reason == noFlush { + userState.fpLocker.Unlock(fp) + return nil + } + + // shouldFlushSeries() has told us we have at least one chunk + chunks := series.chunkDescs + if immediate { + series.closeHead(reasonImmediate) + } else if chunkReason := i.shouldFlushChunk(series.head(), fp, series.isStale()); chunkReason != noFlush { + series.closeHead(chunkReason) + } else { + // The head chunk doesn't need flushing; step back by one. + chunks = chunks[:len(chunks)-1] + } + + if (reason == reasonIdle || reason == reasonStale) && series.headChunkClosed { + if minChunkLength := i.limits.MinChunkLength(userID); minChunkLength > 0 { + chunkLength := 0 + for _, c := range chunks { + chunkLength += c.C.Len() + } + if chunkLength < minChunkLength { + userState.removeSeries(fp, series.metric) + i.metrics.memoryChunks.Sub(float64(len(chunks))) + i.metrics.droppedChunks.Add(float64(len(chunks))) + util.Event().Log( + "msg", "dropped chunks", + "userID", userID, + "numChunks", len(chunks), + "chunkLength", chunkLength, + "fp", fp, + "series", series.metric, + "queue", flushQueueIndex, + ) + chunks = nil + } + } + } + + userState.fpLocker.Unlock(fp) + + if len(chunks) == 0 { + return nil + } + + // flush the chunks without locking the series, as we don't want to hold the series lock for the duration of the dynamo/s3 rpcs. + ctx, cancel := context.WithTimeout(context.Background(), i.cfg.FlushOpTimeout) + defer cancel() // releases resources if slowOperation completes before timeout elapses + + sp, ctx := ot.StartSpanFromContext(ctx, "flushUserSeries") + defer sp.Finish() + sp.SetTag("organization", userID) + + util.Event().Log("msg", "flush chunks", "userID", userID, "reason", reason, "numChunks", len(chunks), "firstTime", chunks[0].FirstTime, "fp", fp, "series", series.metric, "nlabels", len(series.metric), "queue", flushQueueIndex) + err := i.flushChunks(ctx, userID, fp, series.metric, chunks) + if err != nil { + return err + } + + userState.fpLocker.Lock(fp) + if immediate { + userState.removeSeries(fp, series.metric) + i.metrics.memoryChunks.Sub(float64(len(chunks))) + } else { + for i := 0; i < len(chunks); i++ { + // mark the chunks as flushed, so we can remove them after the retention period + series.chunkDescs[i].flushed = true + series.chunkDescs[i].LastUpdate = model.Now() + } + } + userState.fpLocker.Unlock(fp) + return nil +} + +// must be called under fpLocker lock +func (i *Ingester) removeFlushedChunks(userState *userState, fp model.Fingerprint, series *memorySeries) { + now := model.Now() + for len(series.chunkDescs) > 0 { + if series.chunkDescs[0].flushed && now.Sub(series.chunkDescs[0].LastUpdate) > i.cfg.RetainPeriod { + series.chunkDescs[0] = nil // erase reference so the chunk can be garbage-collected + series.chunkDescs = series.chunkDescs[1:] + i.metrics.memoryChunks.Dec() + } else { + break + } + } + if len(series.chunkDescs) == 0 { + userState.removeSeries(fp, series.metric) + } +} + +func (i *Ingester) flushChunks(ctx context.Context, userID string, fp model.Fingerprint, metric labels.Labels, chunkDescs []*desc) error { + wireChunks := make([]chunk.Chunk, 0, len(chunkDescs)) + for _, chunkDesc := range chunkDescs { + c := chunk.NewChunk(userID, fp, metric, chunkDesc.C, chunkDesc.FirstTime, chunkDesc.LastTime) + if err := c.Encode(); err != nil { + return err + } + wireChunks = append(wireChunks, c) + } + + if err := i.chunkStore.Put(ctx, wireChunks); err != nil { + return err + } + + sizePerUser := i.metrics.chunkSizePerUser.WithLabelValues(userID) + countPerUser := i.metrics.chunksPerUser.WithLabelValues(userID) + // Record statistics only when actual put request did not return error. + for _, chunkDesc := range chunkDescs { + utilization, length, size := chunkDesc.C.Utilization(), chunkDesc.C.Len(), chunkDesc.C.Size() + util.Event().Log("msg", "chunk flushed", "userID", userID, "fp", fp, "series", metric, "nlabels", len(metric), "utilization", utilization, "length", length, "size", size, "firstTime", chunkDesc.FirstTime, "lastTime", chunkDesc.LastTime) + i.metrics.chunkUtilization.Observe(utilization) + i.metrics.chunkLength.Observe(float64(length)) + i.metrics.chunkSize.Observe(float64(size)) + sizePerUser.Add(float64(size)) + countPerUser.Inc() + i.metrics.chunkAge.Observe(model.Now().Sub(chunkDesc.FirstTime).Seconds()) + } + + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go new file mode 100644 index 000000000000..6fa1e0fa7fde --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go @@ -0,0 +1,811 @@ +package ingester + +import ( + "context" + "flag" + "fmt" + "net/http" + "sync" + "time" + + "github.com/go-kit/kit/log/level" + "github.com/gogo/status" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/user" + "google.golang.org/grpc/codes" + + cortex_chunk "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/validation" +) + +const ( + // Number of timeseries to return in each batch of a QueryStream. + queryStreamBatchSize = 128 +) + +var ( + // This is initialised if the WAL is enabled and the records are fetched from this pool. + recordPool sync.Pool +) + +// Config for an Ingester. +type Config struct { + WALConfig WALConfig `yaml:"walconfig"` + LifecyclerConfig ring.LifecyclerConfig `yaml:"lifecycler"` + + // Config for transferring chunks. Zero or negative = no retries. + MaxTransferRetries int `yaml:"max_transfer_retries"` + + // Config for chunk flushing. + FlushCheckPeriod time.Duration `yaml:"flush_period"` + RetainPeriod time.Duration `yaml:"retain_period"` + MaxChunkIdle time.Duration `yaml:"max_chunk_idle_time"` + MaxStaleChunkIdle time.Duration `yaml:"max_stale_chunk_idle_time"` + FlushOpTimeout time.Duration `yaml:"flush_op_timeout"` + MaxChunkAge time.Duration `yaml:"max_chunk_age"` + ChunkAgeJitter time.Duration `yaml:"chunk_age_jitter"` + ConcurrentFlushes int `yaml:"concurrent_flushes"` + SpreadFlushes bool `yaml:"spread_flushes"` + + RateUpdatePeriod time.Duration `yaml:"rate_update_period"` + + // Use tsdb block storage + TSDBEnabled bool `yaml:"-"` + TSDBConfig tsdb.Config `yaml:"-"` + + // Injected at runtime and read from the distributor config, required + // to accurately apply global limits. + ShardByAllLabels bool `yaml:"-"` + + // For testing, you can override the address and ID of this ingester. + ingesterClientFactory func(addr string, cfg client.Config) (client.HealthAndIngesterClient, error) +} + +// RegisterFlags adds the flags required to config this to the given FlagSet +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.LifecyclerConfig.RegisterFlags(f) + cfg.WALConfig.RegisterFlags(f) + + f.IntVar(&cfg.MaxTransferRetries, "ingester.max-transfer-retries", 10, "Number of times to try and transfer chunks before falling back to flushing. Negative value or zero disables hand-over.") + f.DurationVar(&cfg.FlushCheckPeriod, "ingester.flush-period", 1*time.Minute, "Period with which to attempt to flush chunks.") + f.DurationVar(&cfg.RetainPeriod, "ingester.retain-period", 5*time.Minute, "Period chunks will remain in memory after flushing.") + f.DurationVar(&cfg.FlushOpTimeout, "ingester.flush-op-timeout", 1*time.Minute, "Timeout for individual flush operations.") + f.DurationVar(&cfg.MaxChunkIdle, "ingester.max-chunk-idle", 5*time.Minute, "Maximum chunk idle time before flushing.") + f.DurationVar(&cfg.MaxStaleChunkIdle, "ingester.max-stale-chunk-idle", 2*time.Minute, "Maximum chunk idle time for chunks terminating in stale markers before flushing. 0 disables it and a stale series is not flushed until the max-chunk-idle timeout is reached.") + f.DurationVar(&cfg.MaxChunkAge, "ingester.max-chunk-age", 12*time.Hour, "Maximum chunk age before flushing.") + f.DurationVar(&cfg.ChunkAgeJitter, "ingester.chunk-age-jitter", 0, "Range of time to subtract from -ingester.max-chunk-age to spread out flushes") + f.BoolVar(&cfg.SpreadFlushes, "ingester.spread-flushes", true, "If true, spread series flushes across the whole period of -ingester.max-chunk-age.") + f.IntVar(&cfg.ConcurrentFlushes, "ingester.concurrent-flushes", 50, "Number of concurrent goroutines flushing to dynamodb.") + f.DurationVar(&cfg.RateUpdatePeriod, "ingester.rate-update-period", 15*time.Second, "Period with which to update the per-user ingestion rates.") +} + +// Ingester deals with "in flight" chunks. Based on Prometheus 1.x +// MemorySeriesStorage. +type Ingester struct { + services.Service + + cfg Config + clientConfig client.Config + + metrics *ingesterMetrics + + chunkStore ChunkStore + lifecycler *ring.Lifecycler + limits *validation.Overrides + limiter *Limiter + subservicesWatcher *services.FailureWatcher + + userStatesMtx sync.RWMutex // protects userStates and stopped + userStates *userStates + stopped bool // protected by userStatesMtx + + // One queue per flush thread. Fingerprint is used to + // pick a queue. + flushQueues []*util.PriorityQueue + flushQueuesDone sync.WaitGroup + + // This should never be nil. + wal WAL + // To be passed to the WAL. + registerer prometheus.Registerer + + // Hook for injecting behaviour from tests. + preFlushUserSeries func() + + // Prometheus block storage + TSDBState TSDBState +} + +// ChunkStore is the interface we need to store chunks +type ChunkStore interface { + Put(ctx context.Context, chunks []cortex_chunk.Chunk) error +} + +// New constructs a new Ingester. +func New(cfg Config, clientConfig client.Config, limits *validation.Overrides, chunkStore ChunkStore, registerer prometheus.Registerer) (*Ingester, error) { + if cfg.ingesterClientFactory == nil { + cfg.ingesterClientFactory = client.MakeIngesterClient + } + + if cfg.TSDBEnabled { + return NewV2(cfg, clientConfig, limits, registerer) + } + + if cfg.WALConfig.WALEnabled { + // If WAL is enabled, we don't transfer out the data to any ingester. + // Either the next ingester which takes it's place should recover from WAL + // or the data has to be flushed during scaledown. + cfg.MaxTransferRetries = 0 + + // Transfers are disabled with WAL, hence no need to wait for transfers. + cfg.LifecyclerConfig.JoinAfter = 0 + + recordPool = sync.Pool{ + New: func() interface{} { + return &Record{} + }, + } + } + + i := &Ingester{ + cfg: cfg, + clientConfig: clientConfig, + metrics: newIngesterMetrics(registerer, true), + limits: limits, + chunkStore: chunkStore, + flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes), + registerer: registerer, + } + + var err error + // During WAL recovery, it will create new user states which requires the limiter. + // Hence initialise the limiter before creating the WAL. + // The '!cfg.WALConfig.WALEnabled' argument says don't flush on shutdown if the WAL is enabled. + i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester", ring.IngesterRingKey, !cfg.WALConfig.WALEnabled) + if err != nil { + return nil, err + } + i.limiter = NewLimiter(limits, i.lifecycler, cfg.LifecyclerConfig.RingConfig.ReplicationFactor, cfg.ShardByAllLabels) + i.subservicesWatcher = services.NewFailureWatcher() + i.subservicesWatcher.WatchService(i.lifecycler) + + i.Service = services.NewBasicService(i.starting, i.loop, i.stopping) + return i, nil +} + +func (i *Ingester) starting(ctx context.Context) error { + if i.cfg.WALConfig.Recover { + level.Info(util.Logger).Log("msg", "recovering from WAL") + start := time.Now() + if err := recoverFromWAL(i); err != nil { + level.Error(util.Logger).Log("msg", "failed to recover from WAL", "time", time.Since(start).String()) + return errors.Wrap(err, "failed to recover from WAL") + } + elapsed := time.Since(start) + level.Info(util.Logger).Log("msg", "recovery from WAL completed", "time", elapsed.String()) + i.metrics.walReplayDuration.Set(elapsed.Seconds()) + } + + // If the WAL recover happened, then the userStates would already be set. + if i.userStates == nil { + i.userStates = newUserStates(i.limiter, i.cfg, i.metrics) + } + + var err error + i.wal, err = newWAL(i.cfg.WALConfig, i.userStates.cp, i.registerer) + if err != nil { + return errors.Wrap(err, "starting WAL") + } + + // Now that user states have been created, we can start the lifecycler. + // Important: we want to keep lifecycler running until we ask it to stop, so we need to give it independent context + if err := i.lifecycler.StartAsync(context.Background()); err != nil { + return errors.Wrap(err, "failed to start lifecycler") + } + if err := i.lifecycler.AwaitRunning(ctx); err != nil { + return errors.Wrap(err, "failed to start lifecycler") + } + + i.startFlushLoops() + + return nil +} + +func (i *Ingester) startFlushLoops() { + i.flushQueuesDone.Add(i.cfg.ConcurrentFlushes) + for j := 0; j < i.cfg.ConcurrentFlushes; j++ { + i.flushQueues[j] = util.NewPriorityQueue(i.metrics.flushQueueLength) + go i.flushLoop(j) + } +} + +// NewForFlusher constructs a new Ingester to be used by flusher target. +// Compared to the 'New' method: +// * Always replays the WAL. +// * Does not start the lifecycler. +// * No ingester v2. +func NewForFlusher(cfg Config, clientConfig client.Config, chunkStore ChunkStore, registerer prometheus.Registerer) (*Ingester, error) { + if cfg.ingesterClientFactory == nil { + cfg.ingesterClientFactory = client.MakeIngesterClient + } + + i := &Ingester{ + cfg: cfg, + clientConfig: clientConfig, + metrics: newIngesterMetrics(registerer, true), + chunkStore: chunkStore, + flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes), + wal: &noopWAL{}, + } + + i.Service = services.NewBasicService(i.startingForFlusher, i.loop, i.stopping) + return i, nil +} + +func (i *Ingester) startingForFlusher(ctx context.Context) error { + level.Info(util.Logger).Log("msg", "recovering from WAL") + + // We recover from WAL always. + start := time.Now() + if err := recoverFromWAL(i); err != nil { + level.Error(util.Logger).Log("msg", "failed to recover from WAL", "time", time.Since(start).String()) + return err + } + elapsed := time.Since(start) + + level.Info(util.Logger).Log("msg", "recovery from WAL completed", "time", elapsed.String()) + i.metrics.walReplayDuration.Set(elapsed.Seconds()) + + i.startFlushLoops() + return nil +} + +func (i *Ingester) loop(ctx context.Context) error { + flushTicker := time.NewTicker(i.cfg.FlushCheckPeriod) + defer flushTicker.Stop() + + rateUpdateTicker := time.NewTicker(i.cfg.RateUpdatePeriod) + defer rateUpdateTicker.Stop() + + for { + select { + case <-flushTicker.C: + i.sweepUsers(false) + + case <-rateUpdateTicker.C: + i.userStates.updateRates() + + case <-ctx.Done(): + return nil + + case err := <-i.subservicesWatcher.Chan(): + return errors.Wrap(err, "ingester subservice failed") + } + } +} + +// stopping is run when ingester is asked to stop +func (i *Ingester) stopping(_ error) error { + i.wal.Stop() + + // This will prevent us accepting any more samples + i.stopIncomingRequests() + + // Lifecycler can be nil if the ingester is for a flusher. + if i.lifecycler != nil { + // Next initiate our graceful exit from the ring. + return services.StopAndAwaitTerminated(context.Background(), i.lifecycler) + } + + return nil +} + +// ShutdownHandler triggers the following set of operations in order: +// * Change the state of ring to stop accepting writes. +// * Flush all the chunks. +func (i *Ingester) ShutdownHandler(w http.ResponseWriter, r *http.Request) { + originalState := i.lifecycler.FlushOnShutdown() + // We want to flush the chunks if transfer fails irrespective of original flag. + i.lifecycler.SetFlushOnShutdown(true) + _ = services.StopAndAwaitTerminated(context.Background(), i) + i.lifecycler.SetFlushOnShutdown(originalState) + w.WriteHeader(http.StatusNoContent) +} + +// stopIncomingRequests is called during the shutdown process. +func (i *Ingester) stopIncomingRequests() { + i.userStatesMtx.Lock() + defer i.userStatesMtx.Unlock() + i.stopped = true +} + +// check that ingester has finished starting, i.e. it is in Running or Stopping state. +// Why Stopping? Because ingester still runs, even when it is transferring data out in Stopping state. +// Ingester handles this state on its own (via `stopped` flag). +func (i *Ingester) checkRunningOrStopping() error { + s := i.State() + if s == services.Running || s == services.Stopping { + return nil + } + return status.Error(codes.Unavailable, s.String()) +} + +// Push implements client.IngesterServer +func (i *Ingester) Push(ctx context.Context, req *client.WriteRequest) (*client.WriteResponse, error) { + if err := i.checkRunningOrStopping(); err != nil { + return nil, err + } + + if i.cfg.TSDBEnabled { + return i.v2Push(ctx, req) + } + + // NOTE: because we use `unsafe` in deserialisation, we must not + // retain anything from `req` past the call to ReuseSlice + defer client.ReuseSlice(req.Timeseries) + + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, fmt.Errorf("no user id") + } + + var firstPartialErr *validationError + var record *Record + if i.cfg.WALConfig.WALEnabled { + record = recordPool.Get().(*Record) + record.UserId = userID + // Assuming there is not much churn in most cases, there is no use + // keeping the record.Labels slice hanging around. + record.Labels = nil + if cap(record.Samples) < len(req.Timeseries) { + record.Samples = make([]Sample, 0, len(req.Timeseries)) + } else { + record.Samples = record.Samples[:0] + } + } + + if len(req.Metadata) > 0 { + logger := util.WithContext(ctx, util.Logger) + level.Debug(logger).Log("msg", "metadata received in the ingester", "count", len(req.Metadata)) + } + + for _, ts := range req.Timeseries { + for _, s := range ts.Samples { + // append() copies the memory in `ts.Labels` except on the error path + err := i.append(ctx, userID, ts.Labels, model.Time(s.TimestampMs), model.SampleValue(s.Value), req.Source, record) + if err == nil { + continue + } + + i.metrics.ingestedSamplesFail.Inc() + if ve, ok := err.(*validationError); ok { + if firstPartialErr == nil { + firstPartialErr = ve + } + continue + } + + // non-validation error: abandon this request + return nil, grpcForwardableError(userID, http.StatusInternalServerError, err) + } + } + + if firstPartialErr != nil { + // grpcForwardableError turns the error into a string so it no longer references `req` + return &client.WriteResponse{}, grpcForwardableError(userID, firstPartialErr.code, firstPartialErr) + } + + if record != nil { + // Log the record only if there was no error in ingestion. + if err := i.wal.Log(record); err != nil { + return nil, err + } + recordPool.Put(record) + } + + return &client.WriteResponse{}, nil +} + +// NOTE: memory for `labels` is unsafe; anything retained beyond the +// life of this function must be copied +func (i *Ingester) append(ctx context.Context, userID string, labels labelPairs, timestamp model.Time, value model.SampleValue, source client.WriteRequest_SourceEnum, record *Record) error { + labels.removeBlanks() + + var ( + state *userState + fp model.Fingerprint + ) + i.userStatesMtx.RLock() + defer func() { + i.userStatesMtx.RUnlock() + if state != nil { + state.fpLocker.Unlock(fp) + } + }() + if i.stopped { + return fmt.Errorf("ingester stopping") + } + + // getOrCreateSeries copies the memory for `labels`, except on the error path. + state, fp, series, err := i.userStates.getOrCreateSeries(ctx, userID, labels, record) + if err != nil { + if ve, ok := err.(*validationError); ok { + state.discardedSamples.WithLabelValues(ve.errorType).Inc() + } + + // Reset the state so that the defer will not try to unlock the fpLocker + // in case of error, because that lock has already been released on error. + state = nil + return err + } + + prevNumChunks := len(series.chunkDescs) + if i.cfg.SpreadFlushes && prevNumChunks > 0 { + // Map from the fingerprint hash to a point in the cycle of period MaxChunkAge + startOfCycle := timestamp.Add(-(timestamp.Sub(model.Time(0)) % i.cfg.MaxChunkAge)) + slot := startOfCycle.Add(time.Duration(uint64(fp) % uint64(i.cfg.MaxChunkAge))) + // If adding this sample means the head chunk will span that point in time, close so it will get flushed + if series.head().FirstTime < slot && timestamp >= slot { + series.closeHead(reasonSpreadFlush) + } + } + + if err := series.add(model.SamplePair{ + Value: value, + Timestamp: timestamp, + }); err != nil { + if ve, ok := err.(*validationError); ok { + state.discardedSamples.WithLabelValues(ve.errorType).Inc() + if ve.noReport { + return nil + } + } + return err + } + + if record != nil { + record.Samples = append(record.Samples, Sample{ + Fingerprint: uint64(fp), + Timestamp: uint64(timestamp), + Value: float64(value), + }) + } + + i.metrics.memoryChunks.Add(float64(len(series.chunkDescs) - prevNumChunks)) + i.metrics.ingestedSamples.Inc() + switch source { + case client.RULE: + state.ingestedRuleSamples.inc() + case client.API: + fallthrough + default: + state.ingestedAPISamples.inc() + } + + return err +} + +// Query implements service.IngesterServer +func (i *Ingester) Query(ctx context.Context, req *client.QueryRequest) (*client.QueryResponse, error) { + if err := i.checkRunningOrStopping(); err != nil { + return nil, err + } + + if i.cfg.TSDBEnabled { + return i.v2Query(ctx, req) + } + + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + + from, through, matchers, err := client.FromQueryRequest(req) + if err != nil { + return nil, err + } + + i.metrics.queries.Inc() + + i.userStatesMtx.RLock() + state, ok, err := i.userStates.getViaContext(ctx) + i.userStatesMtx.RUnlock() + if err != nil { + return nil, err + } else if !ok { + return &client.QueryResponse{}, nil + } + + result := &client.QueryResponse{} + numSeries, numSamples := 0, 0 + maxSamplesPerQuery := i.limits.MaxSamplesPerQuery(userID) + err = state.forSeriesMatching(ctx, matchers, func(ctx context.Context, _ model.Fingerprint, series *memorySeries) error { + values, err := series.samplesForRange(from, through) + if err != nil { + return err + } + if len(values) == 0 { + return nil + } + numSeries++ + + numSamples += len(values) + if numSamples > maxSamplesPerQuery { + return httpgrpc.Errorf(http.StatusRequestEntityTooLarge, "exceeded maximum number of samples in a query (%d)", maxSamplesPerQuery) + } + + ts := client.TimeSeries{ + Labels: client.FromLabelsToLabelAdapters(series.metric), + Samples: make([]client.Sample, 0, len(values)), + } + for _, s := range values { + ts.Samples = append(ts.Samples, client.Sample{ + Value: float64(s.Value), + TimestampMs: int64(s.Timestamp), + }) + } + result.Timeseries = append(result.Timeseries, ts) + return nil + }, nil, 0) + i.metrics.queriedSeries.Observe(float64(numSeries)) + i.metrics.queriedSamples.Observe(float64(numSamples)) + return result, err +} + +// QueryStream implements service.IngesterServer +func (i *Ingester) QueryStream(req *client.QueryRequest, stream client.Ingester_QueryStreamServer) error { + if err := i.checkRunningOrStopping(); err != nil { + return err + } + + if i.cfg.TSDBEnabled { + return i.v2QueryStream(req, stream) + } + + log, ctx := spanlogger.New(stream.Context(), "QueryStream") + + from, through, matchers, err := client.FromQueryRequest(req) + if err != nil { + return err + } + + i.metrics.queries.Inc() + + i.userStatesMtx.RLock() + state, ok, err := i.userStates.getViaContext(ctx) + i.userStatesMtx.RUnlock() + if err != nil { + return err + } else if !ok { + return nil + } + + numSeries, numChunks := 0, 0 + batch := make([]client.TimeSeriesChunk, 0, queryStreamBatchSize) + // We'd really like to have series in label order, not FP order, so we + // can iteratively merge them with entries coming from the chunk store. But + // that would involve locking all the series & sorting, so until we have + // a better solution in the ingesters I'd rather take the hit in the queriers. + err = state.forSeriesMatching(stream.Context(), matchers, func(ctx context.Context, _ model.Fingerprint, series *memorySeries) error { + chunks := make([]*desc, 0, len(series.chunkDescs)) + for _, chunk := range series.chunkDescs { + if !(chunk.FirstTime.After(through) || chunk.LastTime.Before(from)) { + chunks = append(chunks, chunk.slice(from, through)) + } + } + + if len(chunks) == 0 { + return nil + } + + numSeries++ + wireChunks, err := toWireChunks(chunks, nil) + if err != nil { + return err + } + + numChunks += len(wireChunks) + batch = append(batch, client.TimeSeriesChunk{ + Labels: client.FromLabelsToLabelAdapters(series.metric), + Chunks: wireChunks, + }) + + return nil + }, func(ctx context.Context) error { + if len(batch) == 0 { + return nil + } + err = client.SendQueryStream(stream, &client.QueryStreamResponse{ + Chunkseries: batch, + }) + batch = batch[:0] + return err + }, queryStreamBatchSize) + if err != nil { + return err + } + + i.metrics.queriedSeries.Observe(float64(numSeries)) + i.metrics.queriedChunks.Observe(float64(numChunks)) + level.Debug(log).Log("streams", numSeries) + level.Debug(log).Log("chunks", numChunks) + return err +} + +// LabelValues returns all label values that are associated with a given label name. +func (i *Ingester) LabelValues(ctx context.Context, req *client.LabelValuesRequest) (*client.LabelValuesResponse, error) { + if err := i.checkRunningOrStopping(); err != nil { + return nil, err + } + + if i.cfg.TSDBEnabled { + return i.v2LabelValues(ctx, req) + } + + i.userStatesMtx.RLock() + defer i.userStatesMtx.RUnlock() + state, ok, err := i.userStates.getViaContext(ctx) + if err != nil { + return nil, err + } else if !ok { + return &client.LabelValuesResponse{}, nil + } + + resp := &client.LabelValuesResponse{} + resp.LabelValues = append(resp.LabelValues, state.index.LabelValues(req.LabelName)...) + + return resp, nil +} + +// LabelNames return all the label names. +func (i *Ingester) LabelNames(ctx context.Context, req *client.LabelNamesRequest) (*client.LabelNamesResponse, error) { + if err := i.checkRunningOrStopping(); err != nil { + return nil, err + } + + if i.cfg.TSDBEnabled { + return i.v2LabelNames(ctx, req) + } + + i.userStatesMtx.RLock() + defer i.userStatesMtx.RUnlock() + state, ok, err := i.userStates.getViaContext(ctx) + if err != nil { + return nil, err + } else if !ok { + return &client.LabelNamesResponse{}, nil + } + + resp := &client.LabelNamesResponse{} + resp.LabelNames = append(resp.LabelNames, state.index.LabelNames()...) + + return resp, nil +} + +// MetricsForLabelMatchers returns all the metrics which match a set of matchers. +func (i *Ingester) MetricsForLabelMatchers(ctx context.Context, req *client.MetricsForLabelMatchersRequest) (*client.MetricsForLabelMatchersResponse, error) { + if err := i.checkRunningOrStopping(); err != nil { + return nil, err + } + + if i.cfg.TSDBEnabled { + return i.v2MetricsForLabelMatchers(ctx, req) + } + + i.userStatesMtx.RLock() + defer i.userStatesMtx.RUnlock() + state, ok, err := i.userStates.getViaContext(ctx) + if err != nil { + return nil, err + } else if !ok { + return &client.MetricsForLabelMatchersResponse{}, nil + } + + // TODO Right now we ignore start and end. + _, _, matchersSet, err := client.FromMetricsForLabelMatchersRequest(req) + if err != nil { + return nil, err + } + + lss := map[model.Fingerprint]labels.Labels{} + for _, matchers := range matchersSet { + if err := state.forSeriesMatching(ctx, matchers, func(ctx context.Context, fp model.Fingerprint, series *memorySeries) error { + if _, ok := lss[fp]; !ok { + lss[fp] = series.metric + } + return nil + }, nil, 0); err != nil { + return nil, err + } + } + + result := &client.MetricsForLabelMatchersResponse{ + Metric: make([]*client.Metric, 0, len(lss)), + } + for _, ls := range lss { + result.Metric = append(result.Metric, &client.Metric{Labels: client.FromLabelsToLabelAdapters(ls)}) + } + + return result, nil +} + +// UserStats returns ingestion statistics for the current user. +func (i *Ingester) UserStats(ctx context.Context, req *client.UserStatsRequest) (*client.UserStatsResponse, error) { + if err := i.checkRunningOrStopping(); err != nil { + return nil, err + } + + if i.cfg.TSDBEnabled { + return i.v2UserStats(ctx, req) + } + + i.userStatesMtx.RLock() + defer i.userStatesMtx.RUnlock() + state, ok, err := i.userStates.getViaContext(ctx) + if err != nil { + return nil, err + } else if !ok { + return &client.UserStatsResponse{}, nil + } + + apiRate := state.ingestedAPISamples.rate() + ruleRate := state.ingestedRuleSamples.rate() + return &client.UserStatsResponse{ + IngestionRate: apiRate + ruleRate, + ApiIngestionRate: apiRate, + RuleIngestionRate: ruleRate, + NumSeries: uint64(state.fpToSeries.length()), + }, nil +} + +// AllUserStats returns ingestion statistics for all users known to this ingester. +func (i *Ingester) AllUserStats(ctx context.Context, req *client.UserStatsRequest) (*client.UsersStatsResponse, error) { + if err := i.checkRunningOrStopping(); err != nil { + return nil, err + } + + if i.cfg.TSDBEnabled { + return i.v2AllUserStats(ctx, req) + } + + i.userStatesMtx.RLock() + defer i.userStatesMtx.RUnlock() + users := i.userStates.cp() + + response := &client.UsersStatsResponse{ + Stats: make([]*client.UserIDStatsResponse, 0, len(users)), + } + for userID, state := range users { + apiRate := state.ingestedAPISamples.rate() + ruleRate := state.ingestedRuleSamples.rate() + response.Stats = append(response.Stats, &client.UserIDStatsResponse{ + UserId: userID, + Data: &client.UserStatsResponse{ + IngestionRate: apiRate + ruleRate, + ApiIngestionRate: apiRate, + RuleIngestionRate: ruleRate, + NumSeries: uint64(state.fpToSeries.length()), + }, + }) + } + return response, nil +} + +// CheckReady is the readiness handler used to indicate to k8s when the ingesters +// are ready for the addition or removal of another ingester. +func (i *Ingester) CheckReady(ctx context.Context) error { + if err := i.checkRunningOrStopping(); err != nil { + return fmt.Errorf("ingester not ready: %v", err) + } + return i.lifecycler.CheckReady(ctx) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go new file mode 100644 index 000000000000..b5bec043da84 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go @@ -0,0 +1,1041 @@ +package ingester + +import ( + "context" + "fmt" + "io" + "math" + "net/http" + "os" + "path/filepath" + "sync" + "time" + + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/gate" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/shipper" + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/ring" + cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/validation" +) + +const ( + errTSDBCreateIncompatibleState = "cannot create a new TSDB while the ingester is not in active state (current state: %s)" +) + +// Shipper interface is used to have an easy way to mock it in tests. +type Shipper interface { + Sync(ctx context.Context) (uploaded int, err error) +} + +type userTSDB struct { + *tsdb.DB + refCache *cortex_tsdb.RefCache + + // Thanos shipper used to ship blocks to the storage. + shipper Shipper + shipperCtx context.Context + shipperCancel context.CancelFunc + + // for statistics + ingestedAPISamples *ewmaRate + ingestedRuleSamples *ewmaRate +} + +// TSDBState holds data structures used by the TSDB storage engine +type TSDBState struct { + dbs map[string]*userTSDB // tsdb sharded by userID + bucket objstore.Bucket + + // Keeps count of in-flight requests + inflightWriteReqs sync.WaitGroup + + // Used to run only once operations at shutdown, during the blocks/wal + // transferring to a joining ingester + transferOnce sync.Once + + subservices *services.Manager + + tsdbMetrics *tsdbMetrics + + // Head compactions metrics. + compactionsTriggered prometheus.Counter + compactionsFailed prometheus.Counter + walReplayTime prometheus.Histogram +} + +// NewV2 returns a new Ingester that uses prometheus block storage instead of chunk storage +func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, registerer prometheus.Registerer) (*Ingester, error) { + util.WarnExperimentalUse("Blocks storage engine") + bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.TSDBConfig, "cortex", util.Logger) + if err != nil { + return nil, errors.Wrap(err, "failed to create the bucket client") + } + + if registerer != nil { + bucketClient = objstore.BucketWithMetrics( /* bucket label value */ "", bucketClient, prometheus.WrapRegistererWithPrefix("cortex_ingester_", registerer)) + } + + i := &Ingester{ + cfg: cfg, + clientConfig: clientConfig, + metrics: newIngesterMetrics(registerer, false), + limits: limits, + chunkStore: nil, + wal: &noopWAL{}, + TSDBState: TSDBState{ + dbs: make(map[string]*userTSDB), + bucket: bucketClient, + tsdbMetrics: newTSDBMetrics(registerer), + + compactionsTriggered: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_tsdb_compactions_triggered_total", + Help: "Total number of triggered compactions.", + }), + + compactionsFailed: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_tsdb_compactions_failed_total", + Help: "Total number of compactions that failed.", + }), + walReplayTime: promauto.With(registerer).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_ingester_tsdb_wal_replay_duration_seconds", + Help: "The total time it takes to open and replay a TSDB WAL.", + Buckets: prometheus.DefBuckets, + }), + }, + } + + // Replace specific metrics which we can't directly track but we need to read + // them from the underlying system (ie. TSDB). + if registerer != nil { + registerer.Unregister(i.metrics.memSeries) + promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ + Name: "cortex_ingester_memory_series", + Help: "The current number of series in memory.", + }, i.numSeriesInTSDB) + } + + i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester", ring.IngesterRingKey, true) + if err != nil { + return nil, err + } + i.subservicesWatcher = services.NewFailureWatcher() + i.subservicesWatcher.WatchService(i.lifecycler) + + // Init the limter and instantiate the user states which depend on it + i.limiter = NewLimiter(limits, i.lifecycler, cfg.LifecyclerConfig.RingConfig.ReplicationFactor, cfg.ShardByAllLabels) + i.userStates = newUserStates(i.limiter, cfg, i.metrics) + + i.Service = services.NewBasicService(i.startingV2, i.updateLoop, i.stoppingV2) + return i, nil +} + +func (i *Ingester) startingV2(ctx context.Context) error { + // Scan and open TSDB's that already exist on disk + if err := i.openExistingTSDB(context.Background()); err != nil { + return errors.Wrap(err, "opening existing TSDBs") + } + + // Important: we want to keep lifecycler running until we ask it to stop, so we need to give it independent context + if err := i.lifecycler.StartAsync(context.Background()); err != nil { + return errors.Wrap(err, "failed to start lifecycler") + } + if err := i.lifecycler.AwaitRunning(ctx); err != nil { + return errors.Wrap(err, "failed to start lifecycler") + } + + // let's start the rest of subservices via manager + servs := []services.Service(nil) + + compactionService := services.NewBasicService(nil, i.compactionLoop, nil) + servs = append(servs, compactionService) + + if i.cfg.TSDBConfig.ShipInterval > 0 { + shippingService := services.NewBasicService(nil, i.shipBlocksLoop, nil) + servs = append(servs, shippingService) + } + + var err error + i.TSDBState.subservices, err = services.NewManager(servs...) + if err == nil { + err = services.StartManagerAndAwaitHealthy(ctx, i.TSDBState.subservices) + } + return errors.Wrap(err, "failed to start ingester components") +} + +// runs when V2 ingester is stopping +func (i *Ingester) stoppingV2(_ error) error { + // It's important to wait until shipper is finished, + // because the blocks transfer should start only once it's guaranteed + // there's no shipping on-going. + + if err := services.StopManagerAndAwaitStopped(context.Background(), i.TSDBState.subservices); err != nil { + level.Warn(util.Logger).Log("msg", "stopping ingester subservices", "err", err) + } + + // Next initiate our graceful exit from the ring. + return services.StopAndAwaitTerminated(context.Background(), i.lifecycler) +} + +func (i *Ingester) updateLoop(ctx context.Context) error { + rateUpdateTicker := time.NewTicker(i.cfg.RateUpdatePeriod) + defer rateUpdateTicker.Stop() + + // We use an hardcoded value for this ticker because there should be no + // real value in customizing it. + refCachePurgeTicker := time.NewTicker(5 * time.Minute) + defer refCachePurgeTicker.Stop() + + for { + select { + case <-rateUpdateTicker.C: + i.userStatesMtx.RLock() + for _, db := range i.TSDBState.dbs { + db.ingestedAPISamples.tick() + db.ingestedRuleSamples.tick() + } + i.userStatesMtx.RUnlock() + case <-refCachePurgeTicker.C: + for _, userID := range i.getTSDBUsers() { + userDB := i.getTSDB(userID) + if userDB != nil { + userDB.refCache.Purge(time.Now().Add(-cortex_tsdb.DefaultRefCacheTTL)) + } + } + case <-ctx.Done(): + return nil + case err := <-i.subservicesWatcher.Chan(): + return errors.Wrap(err, "ingester subservice failed") + } + } +} + +// v2Push adds metrics to a block +func (i *Ingester) v2Push(ctx context.Context, req *client.WriteRequest) (*client.WriteResponse, error) { + var firstPartialErr error + + // NOTE: because we use `unsafe` in deserialisation, we must not + // retain anything from `req` past the call to ReuseSlice + defer client.ReuseSlice(req.Timeseries) + + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, fmt.Errorf("no user id") + } + + db, err := i.getOrCreateTSDB(userID, false) + if err != nil { + return nil, wrapWithUser(err, userID) + } + + // Ensure the ingester shutdown procedure hasn't started + i.userStatesMtx.RLock() + + if i.stopped { + i.userStatesMtx.RUnlock() + return nil, fmt.Errorf("ingester stopping") + } + + // Keep track of in-flight requests, in order to safely start blocks transfer + // (at shutdown) only once all in-flight write requests have completed. + // It's important to increase the number of in-flight requests within the lock + // (even if sync.WaitGroup is thread-safe), otherwise there's a race condition + // with the TSDB transfer, which - after the stopped flag is set to true - waits + // until all in-flight requests to reach zero. + i.TSDBState.inflightWriteReqs.Add(1) + i.userStatesMtx.RUnlock() + defer i.TSDBState.inflightWriteReqs.Done() + + // Keep track of some stats which are tracked only if the samples will be + // successfully committed + succeededSamplesCount := 0 + failedSamplesCount := 0 + now := time.Now() + + // Walk the samples, appending them to the users database + app := db.Appender() + for _, ts := range req.Timeseries { + // Check if we already have a cached reference for this series. Be aware + // that even if we have a reference it's not guaranteed to be still valid. + // The labels must be sorted (in our case, it's guaranteed a write request + // has sorted labels once hit the ingester). + cachedRef, cachedRefExists := db.refCache.Ref(now, client.FromLabelAdaptersToLabels(ts.Labels)) + + for _, s := range ts.Samples { + var err error + + // If the cached reference exists, we try to use it. + if cachedRefExists { + if err = app.AddFast(cachedRef, s.TimestampMs, s.Value); err == nil { + succeededSamplesCount++ + continue + } + + if errors.Cause(err) == tsdb.ErrNotFound { + cachedRefExists = false + err = nil + } + } + + // If the cached reference doesn't exist, we (re)try without using the reference. + if !cachedRefExists { + var ref uint64 + + // Copy the label set because both TSDB and the cache may retain it. + copiedLabels := client.FromLabelAdaptersToLabelsWithCopy(ts.Labels) + + if ref, err = app.Add(copiedLabels, s.TimestampMs, s.Value); err == nil { + db.refCache.SetRef(now, copiedLabels, ref) + cachedRef = ref + cachedRefExists = true + + succeededSamplesCount++ + continue + } + } + + failedSamplesCount++ + + // Check if the error is a soft error we can proceed on. If so, we keep track + // of it, so that we can return it back to the distributor, which will return a + // 400 error to the client. The client (Prometheus) will not retry on 400, and + // we actually ingested all samples which haven't failed. + cause := errors.Cause(err) + if cause == tsdb.ErrOutOfBounds || cause == tsdb.ErrOutOfOrderSample || cause == tsdb.ErrAmendSample { + if firstPartialErr == nil { + firstPartialErr = errors.Wrapf(err, "series=%s, timestamp=%v", client.FromLabelAdaptersToLabels(ts.Labels).String(), model.Time(s.TimestampMs).Time().Format(time.RFC3339Nano)) + } + + switch cause { + case tsdb.ErrOutOfBounds: + validation.DiscardedSamples.WithLabelValues(sampleOutOfBounds, userID).Inc() + case tsdb.ErrOutOfOrderSample: + validation.DiscardedSamples.WithLabelValues(sampleOutOfOrder, userID).Inc() + case tsdb.ErrAmendSample: + validation.DiscardedSamples.WithLabelValues(newValueForTimestamp, userID).Inc() + } + + continue + } + + // The error looks an issue on our side, so we should rollback + if rollbackErr := app.Rollback(); rollbackErr != nil { + level.Warn(util.Logger).Log("msg", "failed to rollback on error", "user", userID, "err", rollbackErr) + } + + return nil, wrapWithUser(err, userID) + } + } + if err := app.Commit(); err != nil { + return nil, wrapWithUser(err, userID) + } + + // Increment metrics only if the samples have been successfully committed. + // If the code didn't reach this point, it means that we returned an error + // which will be converted into an HTTP 5xx and the client should/will retry. + i.metrics.ingestedSamples.Add(float64(succeededSamplesCount)) + i.metrics.ingestedSamplesFail.Add(float64(failedSamplesCount)) + + switch req.Source { + case client.RULE: + db.ingestedRuleSamples.add(int64(succeededSamplesCount)) + case client.API: + fallthrough + default: + db.ingestedAPISamples.add(int64(succeededSamplesCount)) + } + + if firstPartialErr != nil { + return &client.WriteResponse{}, httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(firstPartialErr, userID).Error()) + } + return &client.WriteResponse{}, nil +} + +func (i *Ingester) v2Query(ctx context.Context, req *client.QueryRequest) (*client.QueryResponse, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + + from, through, matchers, err := client.FromQueryRequest(req) + if err != nil { + return nil, err + } + + i.metrics.queries.Inc() + + db := i.getTSDB(userID) + if db == nil { + return &client.QueryResponse{}, nil + } + + q, err := db.Querier(int64(from), int64(through)) + if err != nil { + return nil, err + } + defer q.Close() + + ss, err := q.Select(matchers...) + if err != nil { + return nil, err + } + + numSamples := 0 + + result := &client.QueryResponse{} + for ss.Next() { + series := ss.At() + + ts := client.TimeSeries{ + Labels: client.FromLabelsToLabelAdapters(series.Labels()), + } + + it := series.Iterator() + for it.Next() { + t, v := it.At() + ts.Samples = append(ts.Samples, client.Sample{Value: v, TimestampMs: t}) + } + + numSamples += len(ts.Samples) + result.Timeseries = append(result.Timeseries, ts) + } + + i.metrics.queriedSeries.Observe(float64(len(result.Timeseries))) + i.metrics.queriedSamples.Observe(float64(numSamples)) + + return result, ss.Err() +} + +func (i *Ingester) v2LabelValues(ctx context.Context, req *client.LabelValuesRequest) (*client.LabelValuesResponse, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + + db := i.getTSDB(userID) + if db == nil { + return &client.LabelValuesResponse{}, nil + } + + // Since we ingester runs with a very limited TSDB retention, we can (and should) query + // label values without any time range bound. + q, err := db.Querier(0, math.MaxInt64) + if err != nil { + return nil, err + } + defer q.Close() + + vals, err := q.LabelValues(req.LabelName) + if err != nil { + return nil, err + } + + return &client.LabelValuesResponse{ + LabelValues: vals, + }, nil +} + +func (i *Ingester) v2LabelNames(ctx context.Context, req *client.LabelNamesRequest) (*client.LabelNamesResponse, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + + db := i.getTSDB(userID) + if db == nil { + return &client.LabelNamesResponse{}, nil + } + + // Since we ingester runs with a very limited TSDB retention, we can (and should) query + // label names without any time range bound. + q, err := db.Querier(0, math.MaxInt64) + if err != nil { + return nil, err + } + defer q.Close() + + names, err := q.LabelNames() + if err != nil { + return nil, err + } + + return &client.LabelNamesResponse{ + LabelNames: names, + }, nil +} + +func (i *Ingester) v2MetricsForLabelMatchers(ctx context.Context, req *client.MetricsForLabelMatchersRequest) (*client.MetricsForLabelMatchersResponse, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + + db := i.getTSDB(userID) + if db == nil { + return &client.MetricsForLabelMatchersResponse{}, nil + } + + // Parse the request + from, to, matchersSet, err := client.FromMetricsForLabelMatchersRequest(req) + if err != nil { + return nil, err + } + + // Create a new instance of the TSDB querier + q, err := db.Querier(int64(from), int64(to)) + if err != nil { + return nil, err + } + defer q.Close() + + // Run a query for each matchers set and collect all the results + added := map[string]struct{}{} + result := &client.MetricsForLabelMatchersResponse{ + Metric: make([]*client.Metric, 0), + } + + for _, matchers := range matchersSet { + seriesSet, err := q.Select(matchers...) + if err != nil { + return nil, err + } + + for seriesSet.Next() { + if seriesSet.Err() != nil { + break + } + + // Given the same series can be matched by multiple matchers and we want to + // return the unique set of matching series, we do check if the series has + // already been added to the result + ls := seriesSet.At().Labels() + key := ls.String() + if _, ok := added[key]; ok { + continue + } + + result.Metric = append(result.Metric, &client.Metric{ + Labels: client.FromLabelsToLabelAdapters(ls), + }) + + added[key] = struct{}{} + } + + // In case of any error while iterating the series, we break + // the execution and return it + if err := seriesSet.Err(); err != nil { + return nil, err + } + } + + return result, nil +} + +func (i *Ingester) v2UserStats(ctx context.Context, req *client.UserStatsRequest) (*client.UserStatsResponse, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + + db := i.getTSDB(userID) + if db == nil { + return &client.UserStatsResponse{}, nil + } + + return createUserStats(db), nil +} + +func (i *Ingester) v2AllUserStats(ctx context.Context, req *client.UserStatsRequest) (*client.UsersStatsResponse, error) { + i.userStatesMtx.RLock() + defer i.userStatesMtx.RUnlock() + + users := i.TSDBState.dbs + + response := &client.UsersStatsResponse{ + Stats: make([]*client.UserIDStatsResponse, 0, len(users)), + } + for userID, db := range users { + response.Stats = append(response.Stats, &client.UserIDStatsResponse{ + UserId: userID, + Data: createUserStats(db), + }) + } + return response, nil +} + +func createUserStats(db *userTSDB) *client.UserStatsResponse { + apiRate := db.ingestedAPISamples.rate() + ruleRate := db.ingestedRuleSamples.rate() + return &client.UserStatsResponse{ + IngestionRate: apiRate + ruleRate, + ApiIngestionRate: apiRate, + RuleIngestionRate: ruleRate, + NumSeries: db.Head().NumSeries(), + } +} + +// v2QueryStream streams metrics from a TSDB. This implements the client.IngesterServer interface +func (i *Ingester) v2QueryStream(req *client.QueryRequest, stream client.Ingester_QueryStreamServer) error { + _, ctx := spanlogger.New(stream.Context(), "v2QueryStream") + + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return err + } + + from, through, matchers, err := client.FromQueryRequest(req) + if err != nil { + return err + } + + i.metrics.queries.Inc() + + db := i.getTSDB(userID) + if db == nil { + return nil + } + + q, err := db.Querier(int64(from), int64(through)) + if err != nil { + return err + } + defer q.Close() + + ss, err := q.Select(matchers...) + if err != nil { + return err + } + + timeseries := make([]client.TimeSeries, 0, queryStreamBatchSize) + batchSize := 0 + numSamples := 0 + numSeries := 0 + for ss.Next() { + series := ss.At() + + // convert labels to LabelAdapter + ts := client.TimeSeries{ + Labels: client.FromLabelsToLabelAdapters(series.Labels()), + } + + it := series.Iterator() + for it.Next() { + t, v := it.At() + ts.Samples = append(ts.Samples, client.Sample{Value: v, TimestampMs: t}) + } + numSamples += len(ts.Samples) + + timeseries = append(timeseries, ts) + numSeries++ + batchSize++ + if batchSize >= queryStreamBatchSize { + err = client.SendQueryStream(stream, &client.QueryStreamResponse{ + Timeseries: timeseries, + }) + if err != nil { + return err + } + + batchSize = 0 + timeseries = timeseries[:0] + } + } + + // Ensure no error occurred while iterating the series set. + if err := ss.Err(); err != nil { + return err + } + + // Final flush any existing metrics + if batchSize != 0 { + err = client.SendQueryStream(stream, &client.QueryStreamResponse{ + Timeseries: timeseries, + }) + if err != nil { + return err + } + } + + i.metrics.queriedSeries.Observe(float64(numSeries)) + i.metrics.queriedSamples.Observe(float64(numSamples)) + return nil +} + +func (i *Ingester) getTSDB(userID string) *userTSDB { + i.userStatesMtx.RLock() + defer i.userStatesMtx.RUnlock() + db := i.TSDBState.dbs[userID] + return db +} + +// List all users for which we have a TSDB. We do it here in order +// to keep the mutex locked for the shortest time possible. +func (i *Ingester) getTSDBUsers() []string { + i.userStatesMtx.RLock() + defer i.userStatesMtx.RUnlock() + + ids := make([]string, 0, len(i.TSDBState.dbs)) + for userID := range i.TSDBState.dbs { + ids = append(ids, userID) + } + + return ids +} + +func (i *Ingester) getOrCreateTSDB(userID string, force bool) (*userTSDB, error) { + db := i.getTSDB(userID) + if db != nil { + return db, nil + } + + i.userStatesMtx.Lock() + defer i.userStatesMtx.Unlock() + + // Check again for DB in the event it was created in-between locks + var ok bool + db, ok = i.TSDBState.dbs[userID] + if ok { + return db, nil + } + + // We're ready to create the TSDB, however we must be sure that the ingester + // is in the ACTIVE state, otherwise it may conflict with the transfer in/out. + // The TSDB is created when the first series is pushed and this shouldn't happen + // to a non-ACTIVE ingester, however we want to protect from any bug, cause we + // may have data loss or TSDB WAL corruption if the TSDB is created before/during + // a transfer in occurs. + if ingesterState := i.lifecycler.GetState(); !force && ingesterState != ring.ACTIVE { + return nil, fmt.Errorf(errTSDBCreateIncompatibleState, ingesterState) + } + + // Create the database and a shipper for a user + db, err := i.createTSDB(userID) + if err != nil { + return nil, err + } + + // Add the db to list of user databases + i.TSDBState.dbs[userID] = db + i.metrics.memUsers.Inc() + + return db, nil +} + +// createTSDB creates a TSDB for a given userID, and returns the created db. +func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { + tsdbPromReg := prometheus.NewRegistry() + udir := i.cfg.TSDBConfig.BlocksDir(userID) + userLogger := util.WithUserID(userID, util.Logger) + + // Create a new user database + db, err := tsdb.Open(udir, userLogger, tsdbPromReg, &tsdb.Options{ + RetentionDuration: uint64(i.cfg.TSDBConfig.Retention / time.Millisecond), + BlockRanges: i.cfg.TSDBConfig.BlockRanges.ToMilliseconds(), + NoLockfile: true, + StripeSize: i.cfg.TSDBConfig.StripeSize, + }) + if err != nil { + return nil, err + } + db.DisableCompactions() // we will compact on our own schedule + + userDB := &userTSDB{ + DB: db, + refCache: cortex_tsdb.NewRefCache(), + ingestedAPISamples: newEWMARate(0.2, i.cfg.RateUpdatePeriod), + ingestedRuleSamples: newEWMARate(0.2, i.cfg.RateUpdatePeriod), + } + + // Thanos shipper requires at least 1 external label to be set. For this reason, + // we set the tenant ID as external label and we'll filter it out when reading + // the series from the storage. + l := labels.Labels{ + { + Name: cortex_tsdb.TenantIDExternalLabel, + Value: userID, + }, + } + + // Create a new shipper for this database + if i.cfg.TSDBConfig.ShipInterval > 0 { + userDB.shipper = shipper.New( + userLogger, + tsdbPromReg, + udir, + cortex_tsdb.NewUserBucketClient(userID, i.TSDBState.bucket), + func() labels.Labels { return l }, metadata.ReceiveSource) + + userDB.shipperCtx, userDB.shipperCancel = context.WithCancel(context.Background()) + } + + i.TSDBState.tsdbMetrics.setRegistryForUser(userID, tsdbPromReg) + return userDB, nil +} + +func (i *Ingester) closeAllTSDB() { + i.userStatesMtx.Lock() + + wg := &sync.WaitGroup{} + wg.Add(len(i.TSDBState.dbs)) + + // Concurrently close all users TSDB + for userID, userDB := range i.TSDBState.dbs { + userID := userID + + go func(db *userTSDB) { + defer wg.Done() + + if err := db.Close(); err != nil { + level.Warn(util.Logger).Log("msg", "unable to close TSDB", "err", err, "user", userID) + return + } + + // Now that the TSDB has been closed, we should remove it from the + // set of open ones. This lock acquisition doesn't deadlock with the + // outer one, because the outer one is released as soon as all go + // routines are started. + i.userStatesMtx.Lock() + delete(i.TSDBState.dbs, userID) + i.userStatesMtx.Unlock() + }(userDB) + } + + // Wait until all Close() completed + i.userStatesMtx.Unlock() + wg.Wait() +} + +// openExistingTSDB walks the user tsdb dir, and opens a tsdb for each user. This may start a WAL replay, so we limit the number of +// concurrently opening TSDB. +func (i *Ingester) openExistingTSDB(ctx context.Context) error { + level.Info(util.Logger).Log("msg", "opening existing TSDBs") + wg := &sync.WaitGroup{} + openGate := gate.New(i.cfg.TSDBConfig.MaxTSDBOpeningConcurrencyOnStartup) + + err := filepath.Walk(i.cfg.TSDBConfig.Dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return filepath.SkipDir + } + + // Skip root dir and all other files + if path == i.cfg.TSDBConfig.Dir || !info.IsDir() { + return nil + } + + // Top level directories are assumed to be user TSDBs + userID := info.Name() + f, err := os.Open(path) + if err != nil { + level.Error(util.Logger).Log("msg", "unable to open user TSDB dir", "err", err, "user", userID, "path", path) + return filepath.SkipDir + } + defer f.Close() + + // If the dir is empty skip it + if _, err := f.Readdirnames(1); err != nil { + if err != io.EOF { + level.Error(util.Logger).Log("msg", "unable to read TSDB dir", "err", err, "user", userID, "path", path) + } + + return filepath.SkipDir + } + + // Limit the number of TSDB's opening concurrently. Start blocks until there's a free spot available or the context is cancelled. + if err := openGate.Start(ctx); err != nil { + return err + } + + wg.Add(1) + go func(userID string) { + defer wg.Done() + defer openGate.Done() + defer func(ts time.Time) { + i.TSDBState.walReplayTime.Observe(time.Since(ts).Seconds()) + }(time.Now()) + + db, err := i.createTSDB(userID) + if err != nil { + level.Error(util.Logger).Log("msg", "unable to open user TSDB", "err", err, "user", userID) + return + } + + // Add the database to the map of user databases + i.userStatesMtx.Lock() + i.TSDBState.dbs[userID] = db + i.userStatesMtx.Unlock() + i.metrics.memUsers.Inc() + }(userID) + + return filepath.SkipDir // Don't descend into directories + }) + + // Wait for all opening routines to finish + wg.Wait() + if err != nil { + level.Error(util.Logger).Log("msg", "error while opening existing TSDBs") + } else { + level.Info(util.Logger).Log("msg", "successfully opened existing TSDBs") + } + return err +} + +// numSeriesInTSDB returns the total number of in-memory series across all open TSDBs. +func (i *Ingester) numSeriesInTSDB() float64 { + i.userStatesMtx.RLock() + defer i.userStatesMtx.RUnlock() + + count := uint64(0) + for _, db := range i.TSDBState.dbs { + count += db.Head().NumSeries() + } + + return float64(count) +} + +func (i *Ingester) shipBlocksLoop(ctx context.Context) error { + // Start a goroutine that will cancel all shipper contexts on ingester + // shutdown, so that if there's any shipper sync in progress it will be + // quickly canceled. + // TODO: this could be a "stoppingFn" for shipper service, but let's keep that for later. + go func() { + <-ctx.Done() + + for _, userID := range i.getTSDBUsers() { + if userDB := i.getTSDB(userID); userDB != nil && userDB.shipperCancel != nil { + userDB.shipperCancel() + } + } + }() + + shipTicker := time.NewTicker(i.cfg.TSDBConfig.ShipInterval) + defer shipTicker.Stop() + + for { + select { + case <-shipTicker.C: + i.shipBlocks(ctx) + + case <-ctx.Done(): + return nil + } + } +} + +func (i *Ingester) shipBlocks(ctx context.Context) { + // Do not ship blocks if the ingester is PENDING or JOINING. It's + // particularly important for the JOINING state because there could + // be a blocks transfer in progress (from another ingester) and if we + // run the shipper in such state we could end up with race conditions. + if ingesterState := i.lifecycler.GetState(); ingesterState == ring.PENDING || ingesterState == ring.JOINING { + level.Info(util.Logger).Log("msg", "TSDB blocks shipping has been skipped because of the current ingester state", "state", ingesterState) + return + } + + // Number of concurrent workers is limited in order to avoid to concurrently sync a lot + // of tenants in a large cluster. + i.runConcurrentUserWorkers(ctx, i.cfg.TSDBConfig.ShipConcurrency, func(userID string) { + // Get the user's DB. If the user doesn't exist, we skip it. + userDB := i.getTSDB(userID) + if userDB == nil || userDB.shipper == nil { + return + } + + // Skip if the shipper context has been canceled. + if userDB.shipperCtx.Err() != nil { + return + } + + // Run the shipper's Sync() to upload unshipped blocks. + if uploaded, err := userDB.shipper.Sync(userDB.shipperCtx); err != nil { + level.Warn(util.Logger).Log("msg", "shipper failed to synchronize TSDB blocks with the storage", "user", userID, "uploaded", uploaded, "err", err) + } else { + level.Debug(util.Logger).Log("msg", "shipper successfully synchronized TSDB blocks with storage", "user", userID, "uploaded", uploaded) + } + }) +} + +func (i *Ingester) compactionLoop(ctx context.Context) error { + ticker := time.NewTicker(i.cfg.TSDBConfig.HeadCompactionInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + i.compactBlocks(ctx) + + case <-ctx.Done(): + return nil + } + } +} + +func (i *Ingester) compactBlocks(ctx context.Context) { + // Don't compact TSDB blocks while JOINING or LEAVING, as there may be ongoing blocks transfers. + if ingesterState := i.lifecycler.GetState(); ingesterState == ring.JOINING || ingesterState == ring.LEAVING { + level.Info(util.Logger).Log("msg", "TSDB blocks compaction has been skipped because of the current ingester state", "state", ingesterState) + return + } + + i.runConcurrentUserWorkers(ctx, i.cfg.TSDBConfig.HeadCompactionConcurrency, func(userID string) { + userDB := i.getTSDB(userID) + if userDB == nil { + return + } + + i.TSDBState.compactionsTriggered.Inc() + err := userDB.Compact() + if err != nil { + i.TSDBState.compactionsFailed.Inc() + level.Warn(util.Logger).Log("msg", "TSDB blocks compaction for user has failed", "user", userID, "err", err) + } else { + level.Debug(util.Logger).Log("msg", "TSDB blocks compaction completed successfully", "user", userID) + } + }) +} + +func (i *Ingester) runConcurrentUserWorkers(ctx context.Context, concurrency int, userFunc func(userID string)) { + wg := sync.WaitGroup{} + ch := make(chan string) + + for ix := 0; ix < concurrency; ix++ { + wg.Add(1) + go func() { + defer wg.Done() + + for userID := range ch { + userFunc(userID) + } + }() + } + +sendLoop: + for _, userID := range i.getTSDBUsers() { + select { + case ch <- userID: + // ok + case <-ctx.Done(): + // don't start new tasks. + break sendLoop + } + } + + close(ch) + + // wait for ongoing workers to finish. + wg.Wait() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/label_pairs.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/label_pairs.go new file mode 100644 index 000000000000..896cbf0053ef --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/label_pairs.go @@ -0,0 +1,90 @@ +package ingester + +import ( + "sort" + "strings" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/util/extract" +) + +// A series is uniquely identified by its set of label name/value +// pairs, which may arrive in any order over the wire +type labelPairs []client.LabelAdapter + +func (a labelPairs) String() string { + var b strings.Builder + + metricName, err := extract.MetricNameFromLabelAdapters(a) + numLabels := len(a) - 1 + if err != nil { + numLabels = len(a) + } + b.WriteString(metricName) + b.WriteByte('{') + count := 0 + for _, pair := range a { + if pair.Name != model.MetricNameLabel { + b.WriteString(pair.Name) + b.WriteString("=\"") + b.WriteString(pair.Value) + b.WriteByte('"') + count++ + if count < numLabels { + b.WriteByte(',') + } + } + } + b.WriteByte('}') + return b.String() +} + +// Remove any label where the value is "" - Prometheus 2+ will remove these +// before sending, but other clients such as Prometheus 1.x might send us blanks. +func (a *labelPairs) removeBlanks() { + for i := 0; i < len(*a); { + if len((*a)[i].Value) == 0 { + // Delete by swap with the value at the end of the slice + (*a)[i] = (*a)[len(*a)-1] + (*a) = (*a)[:len(*a)-1] + continue // go round and check the data that is now at position i + } + i++ + } +} + +func valueForName(s labels.Labels, name string) (string, bool) { + pos := sort.Search(len(s), func(i int) bool { return s[i].Name >= name }) + if pos == len(s) || s[pos].Name != name { + return "", false + } + return s[pos].Value, true +} + +// Check if a and b contain the same name/value pairs +func (a labelPairs) equal(b labels.Labels) bool { + if len(a) != len(b) { + return false + } + // Check as many as we can where the two sets are in the same order + i := 0 + for ; i < len(a); i++ { + if b[i].Name != string(a[i].Name) { + break + } + if b[i].Value != string(a[i].Value) { + return false + } + } + // Now check remaining values using binary search + for ; i < len(a); i++ { + v, found := valueForName(b, a[i].Name) + if !found || v != a[i].Value { + return false + } + } + return true +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go new file mode 100644 index 000000000000..f2184e2cc2ad --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go @@ -0,0 +1,147 @@ +package ingester + +import ( + "fmt" + "math" + + "github.com/cortexproject/cortex/pkg/util/validation" +) + +const ( + errMaxSeriesPerMetricLimitExceeded = "per-metric series limit (local: %d global: %d actual local: %d) exceeded" + errMaxSeriesPerUserLimitExceeded = "per-user series limit (local: %d global: %d actual local: %d) exceeded" +) + +// RingCount is the interface exposed by a ring implementation which allows +// to count members +type RingCount interface { + HealthyInstancesCount() int +} + +// Limiter implements primitives to get the maximum number of series +// an ingester can handle for a specific tenant +type Limiter struct { + limits *validation.Overrides + ring RingCount + replicationFactor int + shardByAllLabels bool +} + +// NewLimiter makes a new in-memory series limiter +func NewLimiter(limits *validation.Overrides, ring RingCount, replicationFactor int, shardByAllLabels bool) *Limiter { + return &Limiter{ + limits: limits, + ring: ring, + replicationFactor: replicationFactor, + shardByAllLabels: shardByAllLabels, + } +} + +// AssertMaxSeriesPerMetric limit has not been reached compared to the current +// number of series in input and returns an error if so. +func (l *Limiter) AssertMaxSeriesPerMetric(userID string, series int) error { + actualLimit := l.maxSeriesPerMetric(userID) + if series < actualLimit { + return nil + } + + localLimit := l.limits.MaxLocalSeriesPerMetric(userID) + globalLimit := l.limits.MaxGlobalSeriesPerMetric(userID) + + return fmt.Errorf(errMaxSeriesPerMetricLimitExceeded, localLimit, globalLimit, actualLimit) +} + +// AssertMaxSeriesPerUser limit has not been reached compared to the current +// number of series in input and returns an error if so. +func (l *Limiter) AssertMaxSeriesPerUser(userID string, series int) error { + actualLimit := l.maxSeriesPerUser(userID) + if series < actualLimit { + return nil + } + + localLimit := l.limits.MaxLocalSeriesPerUser(userID) + globalLimit := l.limits.MaxGlobalSeriesPerUser(userID) + + return fmt.Errorf(errMaxSeriesPerUserLimitExceeded, localLimit, globalLimit, actualLimit) +} + +// MaxSeriesPerQuery returns the maximum number of series a query is allowed to hit. +func (l *Limiter) MaxSeriesPerQuery(userID string) int { + return l.limits.MaxSeriesPerQuery(userID) +} + +func (l *Limiter) maxSeriesPerMetric(userID string) int { + localLimit := l.limits.MaxLocalSeriesPerMetric(userID) + globalLimit := l.limits.MaxGlobalSeriesPerMetric(userID) + + if globalLimit > 0 { + if l.shardByAllLabels { + // We can assume that series are evenly distributed across ingesters + // so we do convert the global limit into a local limit + localLimit = l.minNonZero(localLimit, l.convertGlobalToLocalLimit(globalLimit)) + } else { + // Given a metric is always pushed to the same set of ingesters (based on + // the replication factor), we can configure the per-ingester local limit + // equal to the global limit. + localLimit = l.minNonZero(localLimit, globalLimit) + } + } + + // If both the local and global limits are disabled, we just + // use the largest int value + if localLimit == 0 { + localLimit = math.MaxInt32 + } + + return localLimit +} + +func (l *Limiter) maxSeriesPerUser(userID string) int { + localLimit := l.limits.MaxLocalSeriesPerUser(userID) + + // The global limit is supported only when shard-by-all-labels is enabled, + // otherwise we wouldn't get an even split of series across ingesters and + // can't take a "local decision" without any centralized coordination. + if l.shardByAllLabels { + // We can assume that series are evenly distributed across ingesters + // so we do convert the global limit into a local limit + globalLimit := l.limits.MaxGlobalSeriesPerUser(userID) + localLimit = l.minNonZero(localLimit, l.convertGlobalToLocalLimit(globalLimit)) + } + + // If both the local and global limits are disabled, we just + // use the largest int value + if localLimit == 0 { + localLimit = math.MaxInt32 + } + + return localLimit +} + +func (l *Limiter) convertGlobalToLocalLimit(globalLimit int) int { + if globalLimit == 0 { + return 0 + } + + // Given we don't need a super accurate count (ie. when the ingesters + // topology changes) and we prefer to always be in favor of the tenant, + // we can use a per-ingester limit equal to: + // (global limit / number of ingesters) * replication factor + numIngesters := l.ring.HealthyInstancesCount() + + // May happen because the number of ingesters is asynchronously updated. + // If happens, we just temporarily ignore the global limit. + if numIngesters > 0 { + return int((float64(globalLimit) / float64(numIngesters)) * float64(l.replicationFactor)) + } + + return 0 +} + +func (l *Limiter) minNonZero(first, second int) int { + if first == 0 || (second != 0 && first > second) { + return second + } + + return first +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/locker.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/locker.go new file mode 100644 index 000000000000..3c97f38ba1b0 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/locker.go @@ -0,0 +1,58 @@ +package ingester + +import ( + "sync" + "unsafe" + + "github.com/prometheus/common/model" + + "github.com/cortexproject/cortex/pkg/util" +) + +const ( + cacheLineSize = 64 +) + +// Avoid false sharing when using array of mutexes. +type paddedMutex struct { + sync.Mutex + //nolint:structcheck,unused + pad [cacheLineSize - unsafe.Sizeof(sync.Mutex{})]byte +} + +// fingerprintLocker allows locking individual fingerprints. To limit the number +// of mutexes needed for that, only a fixed number of mutexes are +// allocated. Fingerprints to be locked are assigned to those pre-allocated +// mutexes by their value. Collisions are not detected. If two fingerprints get +// assigned to the same mutex, only one of them can be locked at the same +// time. As long as the number of pre-allocated mutexes is much larger than the +// number of goroutines requiring a fingerprint lock concurrently, the loss in +// efficiency is small. However, a goroutine must never lock more than one +// fingerprint at the same time. (In that case a collision would try to acquire +// the same mutex twice). +type fingerprintLocker struct { + fpMtxs []paddedMutex + numFpMtxs uint32 +} + +// newFingerprintLocker returns a new fingerprintLocker ready for use. At least +// 1024 preallocated mutexes are used, even if preallocatedMutexes is lower. +func newFingerprintLocker(preallocatedMutexes int) *fingerprintLocker { + if preallocatedMutexes < 1024 { + preallocatedMutexes = 1024 + } + return &fingerprintLocker{ + make([]paddedMutex, preallocatedMutexes), + uint32(preallocatedMutexes), + } +} + +// Lock locks the given fingerprint. +func (l *fingerprintLocker) Lock(fp model.Fingerprint) { + l.fpMtxs[util.HashFP(fp)%l.numFpMtxs].Lock() +} + +// Unlock unlocks the given fingerprint. +func (l *fingerprintLocker) Unlock(fp model.Fingerprint) { + l.fpMtxs[util.HashFP(fp)%l.numFpMtxs].Unlock() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go new file mode 100644 index 000000000000..87c1f622b7d4 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go @@ -0,0 +1,154 @@ +package ingester + +import ( + "fmt" + "sort" + "strings" + "sync" + "sync/atomic" + + "github.com/go-kit/kit/log/level" + "github.com/prometheus/common/model" + + "github.com/cortexproject/cortex/pkg/util" +) + +const maxMappedFP = 1 << 20 // About 1M fingerprints reserved for mapping. + +var separatorString = string([]byte{model.SeparatorByte}) + +// fpMappings maps original fingerprints to a map of string representations of +// metrics to the truly unique fingerprint. +type fpMappings map[model.Fingerprint]map[string]model.Fingerprint + +// fpMapper is used to map fingerprints in order to work around fingerprint +// collisions. +type fpMapper struct { + // highestMappedFP has to be aligned for atomic operations. + highestMappedFP model.Fingerprint + + mtx sync.RWMutex // Protects mappings. + mappings fpMappings + + fpToSeries *seriesMap +} + +// newFPMapper loads the collision map from the persistence and +// returns an fpMapper ready to use. +func newFPMapper(fpToSeries *seriesMap) *fpMapper { + return &fpMapper{ + fpToSeries: fpToSeries, + mappings: map[model.Fingerprint]map[string]model.Fingerprint{}, + } +} + +// mapFP takes a raw fingerprint (as returned by Metrics.FastFingerprint) and +// returns a truly unique fingerprint. The caller must have locked the raw +// fingerprint. +// +// If an error is encountered, it is returned together with the unchanged raw +// fingerprint. +func (m *fpMapper) mapFP(fp model.Fingerprint, metric labelPairs) model.Fingerprint { + // First check if we are in the reserved FP space, in which case this is + // automatically a collision that has to be mapped. + if fp <= maxMappedFP { + return m.maybeAddMapping(fp, metric) + } + + // Then check the most likely case: This fp belongs to a series that is + // already in memory. + s, ok := m.fpToSeries.get(fp) + if ok { + // FP exists in memory, but is it for the same metric? + if metric.equal(s.metric) { + // Yup. We are done. + return fp + } + // Collision detected! + return m.maybeAddMapping(fp, metric) + } + // Metric is not in memory. Before doing the expensive archive lookup, + // check if we have a mapping for this metric in place already. + m.mtx.RLock() + mappedFPs, fpAlreadyMapped := m.mappings[fp] + m.mtx.RUnlock() + if fpAlreadyMapped { + // We indeed have mapped fp historically. + ms := metricToUniqueString(metric) + // fp is locked by the caller, so no further locking of + // 'collisions' required (it is specific to fp). + mappedFP, ok := mappedFPs[ms] + if ok { + // Historical mapping found, return the mapped FP. + return mappedFP + } + } + return fp +} + +// maybeAddMapping is only used internally. It takes a detected collision and +// adds it to the collisions map if not yet there. In any case, it returns the +// truly unique fingerprint for the colliding metric. +func (m *fpMapper) maybeAddMapping( + fp model.Fingerprint, + collidingMetric labelPairs, +) model.Fingerprint { + ms := metricToUniqueString(collidingMetric) + m.mtx.RLock() + mappedFPs, ok := m.mappings[fp] + m.mtx.RUnlock() + if ok { + // fp is locked by the caller, so no further locking required. + mappedFP, ok := mappedFPs[ms] + if ok { + return mappedFP // Existing mapping. + } + // A new mapping has to be created. + mappedFP = m.nextMappedFP() + mappedFPs[ms] = mappedFP + level.Debug(util.Logger).Log( + "msg", "fingerprint collision detected, mapping to new fingerprint", + "old_fp", fp, + "new_fp", mappedFP, + "metric", collidingMetric, + ) + return mappedFP + } + // This is the first collision for fp. + mappedFP := m.nextMappedFP() + mappedFPs = map[string]model.Fingerprint{ms: mappedFP} + m.mtx.Lock() + m.mappings[fp] = mappedFPs + m.mtx.Unlock() + level.Debug(util.Logger).Log( + "msg", "fingerprint collision detected, mapping to new fingerprint", + "old_fp", fp, + "new_fp", mappedFP, + "metric", collidingMetric, + ) + return mappedFP +} + +func (m *fpMapper) nextMappedFP() model.Fingerprint { + mappedFP := model.Fingerprint(atomic.AddUint64((*uint64)(&m.highestMappedFP), 1)) + if mappedFP > maxMappedFP { + panic(fmt.Errorf("more than %v fingerprints mapped in collision detection", maxMappedFP)) + } + return mappedFP +} + +// metricToUniqueString turns a metric into a string in a reproducible and +// unique way, i.e. the same metric will always create the same string, and +// different metrics will always create different strings. In a way, it is the +// "ideal" fingerprint function, only that it is more expensive than the +// FastFingerprint function, and its result is not suitable as a key for maps +// and indexes as it might become really large, causing a lot of hashing effort +// in maps and a lot of storage overhead in indexes. +func metricToUniqueString(m labelPairs) string { + parts := make([]string, 0, len(m)) + for _, pair := range m { + parts = append(parts, string(pair.Name)+separatorString+string(pair.Value)) + } + sort.Strings(parts) + return strings.Join(parts, separatorString) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go new file mode 100644 index 000000000000..f1b9548902b6 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go @@ -0,0 +1,290 @@ +package ingester + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/cortexproject/cortex/pkg/util" +) + +const ( + memSeriesCreatedTotalName = "cortex_ingester_memory_series_created_total" + memSeriesCreatedTotalHelp = "The total number of series that were created per user." + + memSeriesRemovedTotalName = "cortex_ingester_memory_series_removed_total" + memSeriesRemovedTotalHelp = "The total number of series that were removed per user." +) + +type ingesterMetrics struct { + flushQueueLength prometheus.Gauge + ingestedSamples prometheus.Counter + ingestedSamplesFail prometheus.Counter + queries prometheus.Counter + queriedSamples prometheus.Histogram + queriedSeries prometheus.Histogram + queriedChunks prometheus.Histogram + memSeries prometheus.Gauge + memUsers prometheus.Gauge + memSeriesCreatedTotal *prometheus.CounterVec + memSeriesRemovedTotal *prometheus.CounterVec + createdChunks prometheus.Counter + walReplayDuration prometheus.Gauge + walCorruptionsTotal prometheus.Counter + + // Chunks / blocks transfer. + sentChunks prometheus.Counter + receivedChunks prometheus.Counter + sentFiles prometheus.Counter + receivedFiles prometheus.Counter + receivedBytes prometheus.Counter + sentBytes prometheus.Counter + + // Chunks flushing. + chunkUtilization prometheus.Histogram + chunkLength prometheus.Histogram + chunkSize prometheus.Histogram + chunksPerUser *prometheus.CounterVec + chunkSizePerUser *prometheus.CounterVec + chunkAge prometheus.Histogram + memoryChunks prometheus.Gauge + flushReasons *prometheus.CounterVec + droppedChunks prometheus.Counter + oldestUnflushedChunkTimestamp prometheus.Gauge +} + +func newIngesterMetrics(r prometheus.Registerer, createMetricsConflictingWithTSDB bool) *ingesterMetrics { + m := &ingesterMetrics{ + flushQueueLength: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_ingester_flush_queue_length", + Help: "The total number of series pending in the flush queue.", + }), + ingestedSamples: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_ingested_samples_total", + Help: "The total number of samples ingested.", + }), + ingestedSamplesFail: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_ingested_samples_failures_total", + Help: "The total number of samples that errored on ingestion.", + }), + queries: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_queries_total", + Help: "The total number of queries the ingester has handled.", + }), + queriedSamples: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_ingester_queried_samples", + Help: "The total number of samples returned from queries.", + // Could easily return 10m samples per query - 10*(8^(8-1)) = 20.9m. + Buckets: prometheus.ExponentialBuckets(10, 8, 8), + }), + queriedSeries: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_ingester_queried_series", + Help: "The total number of series returned from queries.", + // A reasonable upper bound is around 100k - 10*(8^(6-1)) = 327k. + Buckets: prometheus.ExponentialBuckets(10, 8, 6), + }), + queriedChunks: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_ingester_queried_chunks", + Help: "The total number of chunks returned from queries.", + // A small number of chunks per series - 10*(8^(7-1)) = 2.6m. + Buckets: prometheus.ExponentialBuckets(10, 8, 7), + }), + memSeries: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_ingester_memory_series", + Help: "The current number of series in memory.", + }), + memUsers: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_ingester_memory_users", + Help: "The current number of users in memory.", + }), + createdChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_chunks_created_total", + Help: "The total number of chunks the ingester has created.", + }), + walReplayDuration: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_ingester_wal_replay_duration_seconds", + Help: "Time taken to replay the checkpoint and the WAL.", + }), + walCorruptionsTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_wal_corruptions_total", + Help: "Total number of WAL corruptions encountered.", + }), + + // Chunks / blocks transfer. + sentChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_sent_chunks", + Help: "The total number of chunks sent by this ingester whilst leaving.", + }), + receivedChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_received_chunks", + Help: "The total number of chunks received by this ingester whilst joining", + }), + sentFiles: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_sent_files", + Help: "The total number of files sent by this ingester whilst leaving.", + }), + receivedFiles: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_received_files", + Help: "The total number of files received by this ingester whilst joining", + }), + receivedBytes: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_received_bytes_total", + Help: "The total number of bytes received by this ingester whilst joining", + }), + sentBytes: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_sent_bytes_total", + Help: "The total number of bytes sent by this ingester whilst leaving", + }), + + // Chunks flushing. + chunkUtilization: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_ingester_chunk_utilization", + Help: "Distribution of stored chunk utilization (when stored).", + Buckets: prometheus.LinearBuckets(0, 0.2, 6), + }), + chunkLength: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_ingester_chunk_length", + Help: "Distribution of stored chunk lengths (when stored).", + Buckets: prometheus.ExponentialBuckets(5, 2, 11), // biggest bucket is 5*2^(11-1) = 5120 + }), + chunkSize: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_ingester_chunk_size_bytes", + Help: "Distribution of stored chunk sizes (when stored).", + Buckets: prometheus.ExponentialBuckets(500, 2, 5), // biggest bucket is 500*2^(5-1) = 8000 + }), + chunksPerUser: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_ingester_chunks_stored_total", + Help: "Total stored chunks per user.", + }, []string{"user"}), + chunkSizePerUser: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_ingester_chunk_stored_bytes_total", + Help: "Total bytes stored in chunks per user.", + }, []string{"user"}), + chunkAge: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_ingester_chunk_age_seconds", + Help: "Distribution of chunk ages (when stored).", + // with default settings chunks should flush between 5 min and 12 hours + // so buckets at 1min, 5min, 10min, 30min, 1hr, 2hr, 4hr, 10hr, 12hr, 16hr + Buckets: []float64{60, 300, 600, 1800, 3600, 7200, 14400, 36000, 43200, 57600}, + }), + memoryChunks: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_ingester_memory_chunks", + Help: "The total number of chunks in memory.", + }), + flushReasons: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_ingester_flush_reasons", + Help: "Total number of series scheduled for flushing, with reasons.", + }, []string{"reason"}), + droppedChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_dropped_chunks_total", + Help: "Total number of chunks dropped from flushing because they have too few samples.", + }), + oldestUnflushedChunkTimestamp: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_oldest_unflushed_chunk_timestamp_seconds", + Help: "Unix timestamp of the oldest unflushed chunk in the memory", + }), + } + + if createMetricsConflictingWithTSDB { + m.memSeriesCreatedTotal = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: memSeriesCreatedTotalName, + Help: memSeriesCreatedTotalHelp, + }, []string{"user"}) + + m.memSeriesRemovedTotal = promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: memSeriesRemovedTotalName, + Help: memSeriesRemovedTotalHelp, + }, []string{"user"}) + } + + return m +} + +// TSDB metrics collector. Each tenant has its own registry, that TSDB code uses. +type tsdbMetrics struct { + // We aggregate metrics from individual TSDB registries into + // a single set of counters, which are exposed as Cortex metrics. + dirSyncs *prometheus.Desc // sum(thanos_shipper_dir_syncs_total) + dirSyncFailures *prometheus.Desc // sum(thanos_shipper_dir_sync_failures_total) + uploads *prometheus.Desc // sum(thanos_shipper_uploads_total) + uploadFailures *prometheus.Desc // sum(thanos_shipper_upload_failures_total) + + // These two metrics replace metrics in ingesterMetrics, as we count them differently + memSeriesCreatedTotal *prometheus.Desc + memSeriesRemovedTotal *prometheus.Desc + + regsMu sync.RWMutex // custom mutex for shipper registry, to avoid blocking main user state mutex on collection + regs map[string]*prometheus.Registry // One prometheus registry per tenant +} + +func newTSDBMetrics(r prometheus.Registerer) *tsdbMetrics { + m := &tsdbMetrics{ + regs: make(map[string]*prometheus.Registry), + + dirSyncs: prometheus.NewDesc( + "cortex_ingester_shipper_dir_syncs_total", + "TSDB: Total number of dir syncs", + nil, nil), + dirSyncFailures: prometheus.NewDesc( + "cortex_ingester_shipper_dir_sync_failures_total", + "TSDB: Total number of failed dir syncs", + nil, nil), + uploads: prometheus.NewDesc( + "cortex_ingester_shipper_uploads_total", + "TSDB: Total number of uploaded blocks", + nil, nil), + uploadFailures: prometheus.NewDesc( + "cortex_ingester_shipper_upload_failures_total", + "TSDB: Total number of block upload failures", + nil, nil), + + memSeriesCreatedTotal: prometheus.NewDesc(memSeriesCreatedTotalName, memSeriesCreatedTotalHelp, []string{"user"}, nil), + memSeriesRemovedTotal: prometheus.NewDesc(memSeriesRemovedTotalName, memSeriesRemovedTotalHelp, []string{"user"}, nil), + } + + if r != nil { + r.MustRegister(m) + } + return m +} + +func (sm *tsdbMetrics) Describe(out chan<- *prometheus.Desc) { + out <- sm.dirSyncs + out <- sm.dirSyncFailures + out <- sm.uploads + out <- sm.uploadFailures + out <- sm.memSeriesCreatedTotal + out <- sm.memSeriesRemovedTotal +} + +func (sm *tsdbMetrics) Collect(out chan<- prometheus.Metric) { + data := util.BuildMetricFamiliesPerUserFromUserRegistries(sm.registries()) + + // OK, we have it all. Let's build results. + data.SendSumOfCounters(out, sm.dirSyncs, "thanos_shipper_dir_syncs_total") + data.SendSumOfCounters(out, sm.dirSyncFailures, "thanos_shipper_dir_sync_failures_total") + data.SendSumOfCounters(out, sm.uploads, "thanos_shipper_uploads_total") + data.SendSumOfCounters(out, sm.uploadFailures, "thanos_shipper_upload_failures_total") + + data.SendSumOfCountersPerUser(out, sm.memSeriesCreatedTotal, "prometheus_tsdb_head_series_created_total") + data.SendSumOfCountersPerUser(out, sm.memSeriesRemovedTotal, "prometheus_tsdb_head_series_removed_total") +} + +// make a copy of the map, so that metrics can be gathered while the new registry is being added. +func (sm *tsdbMetrics) registries() map[string]*prometheus.Registry { + sm.regsMu.RLock() + defer sm.regsMu.RUnlock() + + regs := make(map[string]*prometheus.Registry, len(sm.regs)) + for u, r := range sm.regs { + regs[u] = r + } + return regs +} + +func (sm *tsdbMetrics) setRegistryForUser(userID string, registry *prometheus.Registry) { + sm.regsMu.Lock() + sm.regs[userID] = registry + sm.regsMu.Unlock() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/rate.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/rate.go new file mode 100644 index 000000000000..d0c348677b4e --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/rate.go @@ -0,0 +1,57 @@ +package ingester + +import ( + "sync" + "sync/atomic" + "time" +) + +// ewmaRate tracks an exponentially weighted moving average of a per-second rate. +type ewmaRate struct { + newEvents int64 + alpha float64 + interval time.Duration + lastRate float64 + init bool + mutex sync.Mutex +} + +func newEWMARate(alpha float64, interval time.Duration) *ewmaRate { + return &ewmaRate{ + alpha: alpha, + interval: interval, + } +} + +// rate returns the per-second rate. +func (r *ewmaRate) rate() float64 { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.lastRate +} + +// tick assumes to be called every r.interval. +func (r *ewmaRate) tick() { + newEvents := atomic.LoadInt64(&r.newEvents) + atomic.AddInt64(&r.newEvents, -newEvents) + instantRate := float64(newEvents) / r.interval.Seconds() + + r.mutex.Lock() + defer r.mutex.Unlock() + + if r.init { + r.lastRate += r.alpha * (instantRate - r.lastRate) + } else { + r.init = true + r.lastRate = instantRate + } +} + +// inc counts one event. +func (r *ewmaRate) inc() { + atomic.AddInt64(&r.newEvents, 1) +} + +func (r *ewmaRate) add(delta int64) { + atomic.AddInt64(&r.newEvents, delta) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/series.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/series.go new file mode 100644 index 000000000000..35707284ca22 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/series.go @@ -0,0 +1,260 @@ +package ingester + +import ( + "fmt" + "sort" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/value" + + "github.com/cortexproject/cortex/pkg/chunk/encoding" + "github.com/cortexproject/cortex/pkg/prom1/storage/metric" +) + +const ( + sampleOutOfOrder = "sample-out-of-order" + newValueForTimestamp = "new-value-for-timestamp" + sampleOutOfBounds = "sample-out-of-bounds" + duplicateSample = "duplicate-sample" + duplicateTimestamp = "duplicate-timestamp" +) + +type memorySeries struct { + metric labels.Labels + + // Sorted by start time, overlapping chunk ranges are forbidden. + chunkDescs []*desc + + // Whether the current head chunk has already been finished. If true, + // the current head chunk must not be modified anymore. + headChunkClosed bool + + // The timestamp & value of the last sample in this series. Needed to + // ensure timestamp monotonicity during ingestion. + lastSampleValueSet bool + lastTime model.Time + lastSampleValue model.SampleValue + + // Prometheus metrics. + createdChunks prometheus.Counter +} + +// newMemorySeries returns a pointer to a newly allocated memorySeries for the +// given metric. +func newMemorySeries(m labels.Labels, createdChunks prometheus.Counter) *memorySeries { + return &memorySeries{ + metric: m, + lastTime: model.Earliest, + createdChunks: createdChunks, + } +} + +// add adds a sample pair to the series, possibly creating a new chunk. +// The caller must have locked the fingerprint of the series. +func (s *memorySeries) add(v model.SamplePair) error { + // If sender has repeated the same timestamp, check more closely and perhaps return error. + if v.Timestamp == s.lastTime { + // If we don't know what the last sample value is, silently discard. + // This will mask some errors but better than complaining when we don't really know. + if !s.lastSampleValueSet { + return makeNoReportError(duplicateTimestamp) + } + // If both timestamp and sample value are the same as for the last append, + // ignore as they are a common occurrence when using client-side timestamps + // (e.g. Pushgateway or federation). + if v.Value.Equal(s.lastSampleValue) { + return makeNoReportError(duplicateSample) + } + return makeMetricValidationError(newValueForTimestamp, s.metric, + fmt.Errorf("sample with repeated timestamp but different value; last value: %v, incoming value: %v", s.lastSampleValue, v.Value)) + } + if v.Timestamp < s.lastTime { + return makeMetricValidationError(sampleOutOfOrder, s.metric, + fmt.Errorf("sample timestamp out of order; last timestamp: %v, incoming timestamp: %v", s.lastTime, v.Timestamp)) + } + + if len(s.chunkDescs) == 0 || s.headChunkClosed { + newHead := newDesc(encoding.New(), v.Timestamp, v.Timestamp) + s.chunkDescs = append(s.chunkDescs, newHead) + s.headChunkClosed = false + s.createdChunks.Inc() + } + + newChunk, err := s.head().add(v) + if err != nil { + return err + } + + // If we get a single chunk result, then just replace the head chunk with it + // (no need to update first/last time). Otherwise, we'll need to update first + // and last time. + if newChunk != nil { + first, last, err := firstAndLastTimes(newChunk) + if err != nil { + return err + } + s.chunkDescs = append(s.chunkDescs, newDesc(newChunk, first, last)) + s.createdChunks.Inc() + } + + s.lastTime = v.Timestamp + s.lastSampleValue = v.Value + s.lastSampleValueSet = true + + return nil +} + +func firstAndLastTimes(c encoding.Chunk) (model.Time, model.Time, error) { + var ( + first model.Time + last model.Time + firstSet bool + iter = c.NewIterator(nil) + ) + for iter.Scan() { + sample := iter.Value() + if !firstSet { + first = sample.Timestamp + firstSet = true + } + last = sample.Timestamp + } + return first, last, iter.Err() +} + +// closeHead marks the head chunk closed. The caller must have locked +// the fingerprint of the memorySeries. This method will panic if this +// series has no chunk descriptors. +func (s *memorySeries) closeHead(reason flushReason) { + s.chunkDescs[0].flushReason = reason + s.headChunkClosed = true +} + +// firstTime returns the earliest known time for the series. The caller must have +// locked the fingerprint of the memorySeries. This method will panic if this +// series has no chunk descriptors. +func (s *memorySeries) firstTime() model.Time { + return s.chunkDescs[0].FirstTime +} + +// Returns time of oldest chunk in the series, that isn't flushed. If there are +// no chunks, or all chunks are flushed, returns 0. +// The caller must have locked the fingerprint of the memorySeries. +func (s *memorySeries) firstUnflushedChunkTime() model.Time { + for _, c := range s.chunkDescs { + if !c.flushed { + return c.FirstTime + } + } + + return 0 +} + +// head returns a pointer to the head chunk descriptor. The caller must have +// locked the fingerprint of the memorySeries. This method will panic if this +// series has no chunk descriptors. +func (s *memorySeries) head() *desc { + return s.chunkDescs[len(s.chunkDescs)-1] +} + +func (s *memorySeries) samplesForRange(from, through model.Time) ([]model.SamplePair, error) { + // Find first chunk with start time after "from". + fromIdx := sort.Search(len(s.chunkDescs), func(i int) bool { + return s.chunkDescs[i].FirstTime.After(from) + }) + // Find first chunk with start time after "through". + throughIdx := sort.Search(len(s.chunkDescs), func(i int) bool { + return s.chunkDescs[i].FirstTime.After(through) + }) + if fromIdx == len(s.chunkDescs) { + // Even the last chunk starts before "from". Find out if the + // series ends before "from" and we don't need to do anything. + lt := s.chunkDescs[len(s.chunkDescs)-1].LastTime + if lt.Before(from) { + return nil, nil + } + } + if fromIdx > 0 { + fromIdx-- + } + if throughIdx == len(s.chunkDescs) { + throughIdx-- + } + var values []model.SamplePair + in := metric.Interval{ + OldestInclusive: from, + NewestInclusive: through, + } + var reuseIter encoding.Iterator + for idx := fromIdx; idx <= throughIdx; idx++ { + cd := s.chunkDescs[idx] + reuseIter = cd.C.NewIterator(reuseIter) + chValues, err := encoding.RangeValues(reuseIter, in) + if err != nil { + return nil, err + } + values = append(values, chValues...) + } + return values, nil +} + +func (s *memorySeries) setChunks(descs []*desc) error { + if len(s.chunkDescs) != 0 { + return fmt.Errorf("series already has chunks") + } + + s.chunkDescs = descs + if len(descs) > 0 { + s.lastTime = descs[len(descs)-1].LastTime + } + return nil +} + +func (s *memorySeries) isStale() bool { + return s.lastSampleValueSet && value.IsStaleNaN(float64(s.lastSampleValue)) +} + +type desc struct { + C encoding.Chunk // nil if chunk is evicted. + FirstTime model.Time // Timestamp of first sample. Populated at creation. Immutable. + LastTime model.Time // Timestamp of last sample. Populated at creation & on append. + LastUpdate model.Time // This server's local time on last change + flushReason flushReason // If chunk is closed, holds the reason why. + flushed bool // set to true when flush succeeds +} + +func newDesc(c encoding.Chunk, firstTime model.Time, lastTime model.Time) *desc { + return &desc{ + C: c, + FirstTime: firstTime, + LastTime: lastTime, + LastUpdate: model.Now(), + } +} + +// Add adds a sample pair to the underlying chunk. For safe concurrent access, +// The chunk must be pinned, and the caller must have locked the fingerprint of +// the series. +func (d *desc) add(s model.SamplePair) (encoding.Chunk, error) { + cs, err := d.C.Add(s) + if err != nil { + return nil, err + } + + if cs == nil { + d.LastTime = s.Timestamp // sample was added to this chunk + d.LastUpdate = model.Now() + } + + return cs, nil +} + +func (d *desc) slice(start, end model.Time) *desc { + return &desc{ + C: d.C.Slice(start, end), + FirstTime: start, + LastTime: end, + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go new file mode 100644 index 000000000000..a8e4ba70613b --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go @@ -0,0 +1,110 @@ +package ingester + +import ( + "sync" + "sync/atomic" + "unsafe" + + "github.com/prometheus/common/model" + + "github.com/cortexproject/cortex/pkg/util" +) + +const seriesMapShards = 128 + +// seriesMap maps fingerprints to memory series. All its methods are +// goroutine-safe. A seriesMap is effectively a goroutine-safe version of +// map[model.Fingerprint]*memorySeries. +type seriesMap struct { + size int32 + shards []shard +} + +type shard struct { + mtx sync.Mutex + m map[model.Fingerprint]*memorySeries + + // Align this struct. + _ [cacheLineSize - unsafe.Sizeof(sync.Mutex{}) - unsafe.Sizeof(map[model.Fingerprint]*memorySeries{})]byte +} + +// fingerprintSeriesPair pairs a fingerprint with a memorySeries pointer. +type fingerprintSeriesPair struct { + fp model.Fingerprint + series *memorySeries +} + +// newSeriesMap returns a newly allocated empty seriesMap. To create a seriesMap +// based on a prefilled map, use an explicit initializer. +func newSeriesMap() *seriesMap { + shards := make([]shard, seriesMapShards) + for i := 0; i < seriesMapShards; i++ { + shards[i].m = map[model.Fingerprint]*memorySeries{} + } + return &seriesMap{ + shards: shards, + } +} + +// get returns a memorySeries for a fingerprint. Return values have the same +// semantics as the native Go map. +func (sm *seriesMap) get(fp model.Fingerprint) (*memorySeries, bool) { + shard := &sm.shards[util.HashFP(fp)%seriesMapShards] + shard.mtx.Lock() + ms, ok := shard.m[fp] + shard.mtx.Unlock() + return ms, ok +} + +// put adds a mapping to the seriesMap. +func (sm *seriesMap) put(fp model.Fingerprint, s *memorySeries) { + shard := &sm.shards[util.HashFP(fp)%seriesMapShards] + shard.mtx.Lock() + _, ok := shard.m[fp] + shard.m[fp] = s + shard.mtx.Unlock() + + if !ok { + atomic.AddInt32(&sm.size, 1) + } +} + +// del removes a mapping from the series Map. +func (sm *seriesMap) del(fp model.Fingerprint) { + shard := &sm.shards[util.HashFP(fp)%seriesMapShards] + shard.mtx.Lock() + _, ok := shard.m[fp] + delete(shard.m, fp) + shard.mtx.Unlock() + if ok { + atomic.AddInt32(&sm.size, -1) + } +} + +// iter returns a channel that produces all mappings in the seriesMap. The +// channel will be closed once all fingerprints have been received. Not +// consuming all fingerprints from the channel will leak a goroutine. The +// semantics of concurrent modification of seriesMap is the similar as the one +// for iterating over a map with a 'range' clause. However, if the next element +// in iteration order is removed after the current element has been received +// from the channel, it will still be produced by the channel. +func (sm *seriesMap) iter() <-chan fingerprintSeriesPair { + ch := make(chan fingerprintSeriesPair) + go func() { + for i := range sm.shards { + sm.shards[i].mtx.Lock() + for fp, ms := range sm.shards[i].m { + sm.shards[i].mtx.Unlock() + ch <- fingerprintSeriesPair{fp, ms} + sm.shards[i].mtx.Lock() + } + sm.shards[i].mtx.Unlock() + } + close(ch) + }() + return ch +} + +func (sm *seriesMap) length() int { + return int(atomic.LoadInt32(&sm.size)) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go new file mode 100644 index 000000000000..488e7bbe8912 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go @@ -0,0 +1,793 @@ +package ingester + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "github.com/go-kit/kit/log/level" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/thanos-io/thanos/pkg/shipper" + "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/chunk/encoding" + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/util" +) + +var ( + errTransferNoPendingIngesters = errors.New("no pending ingesters") +) + +// returns source ingesterID, number of received series, added chunks and error +func (i *Ingester) fillUserStatesFromStream(userStates *userStates, stream client.Ingester_TransferChunksServer) (fromIngesterID string, seriesReceived int, retErr error) { + chunksAdded := 0.0 + + defer func() { + if retErr != nil { + // Ensure the in memory chunks are updated to reflect the number of dropped chunks from the transfer + i.metrics.memoryChunks.Sub(chunksAdded) + + // If an error occurs during the transfer and the user state is to be discarded, + // ensure the metrics it exports reflect this. + userStates.teardown() + } + }() + + for { + wireSeries, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + retErr = errors.Wrap(err, "TransferChunks: Recv") + return + } + + // We can't send "extra" fields with a streaming call, so we repeat + // wireSeries.FromIngesterId and assume it is the same every time + // round this loop. + if fromIngesterID == "" { + fromIngesterID = wireSeries.FromIngesterId + level.Info(util.Logger).Log("msg", "processing TransferChunks request", "from_ingester", fromIngesterID) + + // Before transfer, make sure 'from' ingester is in correct state to call ClaimTokensFor later + err := i.checkFromIngesterIsInLeavingState(stream.Context(), fromIngesterID) + if err != nil { + retErr = errors.Wrap(err, "TransferChunks: checkFromIngesterIsInLeavingState") + return + } + } + descs, err := fromWireChunks(wireSeries.Chunks) + if err != nil { + retErr = errors.Wrap(err, "TransferChunks: fromWireChunks") + return + } + + state, fp, series, err := userStates.getOrCreateSeries(stream.Context(), wireSeries.UserId, wireSeries.Labels, nil) + if err != nil { + retErr = errors.Wrapf(err, "TransferChunks: getOrCreateSeries: user %s series %s", wireSeries.UserId, wireSeries.Labels) + return + } + prevNumChunks := len(series.chunkDescs) + + err = series.setChunks(descs) + state.fpLocker.Unlock(fp) // acquired in getOrCreateSeries + if err != nil { + retErr = errors.Wrapf(err, "TransferChunks: setChunks: user %s series %s", wireSeries.UserId, wireSeries.Labels) + return + } + + seriesReceived++ + chunksDelta := float64(len(series.chunkDescs) - prevNumChunks) + chunksAdded += chunksDelta + i.metrics.memoryChunks.Add(chunksDelta) + i.metrics.receivedChunks.Add(float64(len(descs))) + } + + if seriesReceived == 0 { + level.Error(util.Logger).Log("msg", "received TransferChunks request with no series", "from_ingester", fromIngesterID) + retErr = fmt.Errorf("TransferChunks: no series") + return + } + + if fromIngesterID == "" { + level.Error(util.Logger).Log("msg", "received TransferChunks request with no ID from ingester") + retErr = fmt.Errorf("no ingester id") + return + } + + if err := i.lifecycler.ClaimTokensFor(stream.Context(), fromIngesterID); err != nil { + retErr = errors.Wrap(err, "TransferChunks: ClaimTokensFor") + return + } + + return +} + +// TransferChunks receives all the chunks from another ingester. +func (i *Ingester) TransferChunks(stream client.Ingester_TransferChunksServer) error { + fromIngesterID := "" + seriesReceived := 0 + + xfer := func() error { + userStates := newUserStates(i.limiter, i.cfg, i.metrics) + + var err error + fromIngesterID, seriesReceived, err = i.fillUserStatesFromStream(userStates, stream) + + if err != nil { + return err + } + + i.userStatesMtx.Lock() + defer i.userStatesMtx.Unlock() + + i.userStates = userStates + + return nil + } + + if err := i.transfer(stream.Context(), xfer); err != nil { + return err + } + + // Close the stream last, as this is what tells the "from" ingester that + // it's OK to shut down. + if err := stream.SendAndClose(&client.TransferChunksResponse{}); err != nil { + level.Error(util.Logger).Log("msg", "Error closing TransferChunks stream", "from_ingester", fromIngesterID, "err", err) + return err + } + level.Info(util.Logger).Log("msg", "Successfully transferred chunks", "from_ingester", fromIngesterID, "series_received", seriesReceived) + + return nil +} + +// Ring gossiping: check if "from" ingester is in LEAVING state. It should be, but we may not see that yet +// when using gossip ring. If we cannot see ingester is the LEAVING state yet, we don't accept this +// transfer, as claiming tokens would possibly end up with this ingester owning no tokens, due to conflict +// resolution in ring merge function. Hopefully the leaving ingester will retry transfer again. +func (i *Ingester) checkFromIngesterIsInLeavingState(ctx context.Context, fromIngesterID string) error { + v, err := i.lifecycler.KVStore.Get(ctx, i.lifecycler.RingKey) + if err != nil { + return errors.Wrap(err, "get ring") + } + if v == nil { + return fmt.Errorf("ring not found when checking state of source ingester") + } + r, ok := v.(*ring.Desc) + if !ok || r == nil { + return fmt.Errorf("ring not found, got %T", v) + } + + if r.Ingesters == nil || r.Ingesters[fromIngesterID].State != ring.LEAVING { + return fmt.Errorf("source ingester is not in a LEAVING state, found state=%v", r.Ingesters[fromIngesterID].State) + } + + // all fine + return nil +} + +func (i *Ingester) transfer(ctx context.Context, xfer func() error) error { + // Enter JOINING state (only valid from PENDING) + if err := i.lifecycler.ChangeState(ctx, ring.JOINING); err != nil { + return err + } + + // The ingesters state effectively works as a giant mutex around this whole + // method, and as such we have to ensure we unlock the mutex. + defer func() { + state := i.lifecycler.GetState() + if i.lifecycler.GetState() == ring.ACTIVE { + return + } + + level.Error(util.Logger).Log("msg", "TransferChunks failed, not in ACTIVE state.", "state", state) + + // Enter PENDING state (only valid from JOINING) + if i.lifecycler.GetState() == ring.JOINING { + if err := i.lifecycler.ChangeState(ctx, ring.PENDING); err != nil { + level.Error(util.Logger).Log("msg", "error rolling back failed TransferChunks", "err", err) + os.Exit(1) + } + } + }() + + if err := xfer(); err != nil { + return err + } + + if err := i.lifecycler.ChangeState(ctx, ring.ACTIVE); err != nil { + return errors.Wrap(err, "Transfer: ChangeState") + } + + return nil +} + +// TransferTSDB receives all the file chunks from another ingester, and writes them to tsdb directories +func (i *Ingester) TransferTSDB(stream client.Ingester_TransferTSDBServer) error { + fromIngesterID := "" + + xfer := func() error { + + // Validate the final directory is empty, if it exists and is empty delete it so a move can succeed + err := removeEmptyDir(i.cfg.TSDBConfig.Dir) + if err != nil { + return errors.Wrap(err, "remove existing TSDB directory") + } + + tmpDir, err := ioutil.TempDir("", "tsdb_xfer") + if err != nil { + return errors.Wrap(err, "unable to create temporary directory to store transferred TSDB blocks") + } + defer os.RemoveAll(tmpDir) + + bytesXfer := 0 + filesXfer := 0 + + files := make(map[string]*os.File) + defer func() { + for _, f := range files { + if err := f.Close(); err != nil { + level.Warn(util.Logger).Log("msg", "failed to close xfer file", "err", err) + } + } + }() + for { + f, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return errors.Wrap(err, "TransferTSDB: Recv") + } + if fromIngesterID == "" { + fromIngesterID = f.FromIngesterId + level.Info(util.Logger).Log("msg", "processing TransferTSDB request", "from_ingester", fromIngesterID) + + // Before transfer, make sure 'from' ingester is in correct state to call ClaimTokensFor later + err := i.checkFromIngesterIsInLeavingState(stream.Context(), fromIngesterID) + if err != nil { + return errors.Wrap(err, "TransferTSDB: checkFromIngesterIsInLeavingState") + } + } + bytesXfer += len(f.Data) + + createfile := func(f *client.TimeSeriesFile) (*os.File, error) { + dir := filepath.Join(tmpDir, filepath.Dir(f.Filename)) + if err := os.MkdirAll(dir, 0777); err != nil { + return nil, errors.Wrap(err, "TransferTSDB: MkdirAll") + } + file, err := os.Create(filepath.Join(tmpDir, f.Filename)) + if err != nil { + return nil, errors.Wrap(err, "TransferTSDB: Create") + } + + _, err = file.Write(f.Data) + return file, errors.Wrap(err, "TransferTSDB: Write") + } + + // Create or get existing open file + file, ok := files[f.Filename] + if !ok { + file, err = createfile(f) + if err != nil { + return errors.Wrapf(err, "unable to create file %s to store incoming TSDB block", f) + } + filesXfer++ + files[f.Filename] = file + } else { + + // Write to existing file + if _, err := file.Write(f.Data); err != nil { + return errors.Wrap(err, "TransferTSDB: Write") + } + } + } + + if err := i.lifecycler.ClaimTokensFor(stream.Context(), fromIngesterID); err != nil { + return errors.Wrap(err, "TransferTSDB: ClaimTokensFor") + } + + i.metrics.receivedBytes.Add(float64(bytesXfer)) + i.metrics.receivedFiles.Add(float64(filesXfer)) + level.Info(util.Logger).Log("msg", "Total xfer", "from_ingester", fromIngesterID, "files", filesXfer, "bytes", bytesXfer) + + // Move the tmpdir to the final location + err = os.Rename(tmpDir, i.cfg.TSDBConfig.Dir) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("unable to move received TSDB blocks from %s to %s", tmpDir, i.cfg.TSDBConfig.Dir)) + } + + // At this point all TSDBs have been received, so we can proceed loading TSDBs in memory. + // This is required because of two reasons: + // 1. No WAL replay performance penalty once the ingester switches to ACTIVE state + // 2. If a query is received on user X, for which the TSDB has been transferred, before + // the first series is ingested, if we don't open the TSDB the query will return an + // empty result (because the TSDB is opened only on first push or transfer) + userIDs, err := ioutil.ReadDir(i.cfg.TSDBConfig.Dir) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("unable to list TSDB users in %s", i.cfg.TSDBConfig.Dir)) + } + + for _, user := range userIDs { + userID := user.Name() + + level.Info(util.Logger).Log("msg", fmt.Sprintf("Loading TSDB for user %s", userID)) + _, err = i.getOrCreateTSDB(userID, true) + + if err != nil { + level.Error(util.Logger).Log("msg", fmt.Sprintf("Unable to load TSDB for user %s", userID), "err", err) + } else { + level.Info(util.Logger).Log("msg", fmt.Sprintf("Loaded TSDB for user %s", userID)) + } + } + + return nil + } + + if err := i.transfer(stream.Context(), xfer); err != nil { + return err + } + + // Close the stream last, as this is what tells the "from" ingester that + // it's OK to shut down. + if err := stream.SendAndClose(&client.TransferTSDBResponse{}); err != nil { + level.Error(util.Logger).Log("msg", "Error closing TransferTSDB stream", "from_ingester", fromIngesterID, "err", err) + return err + } + level.Info(util.Logger).Log("msg", "Successfully transferred tsdbs", "from_ingester", fromIngesterID) + + return nil +} + +// The passed wireChunks slice is for re-use. +func toWireChunks(descs []*desc, wireChunks []client.Chunk) ([]client.Chunk, error) { + if cap(wireChunks) < len(descs) { + wireChunks = make([]client.Chunk, 0, len(descs)) + } + wireChunks = wireChunks[:0] + for _, d := range descs { + wireChunk := client.Chunk{ + StartTimestampMs: int64(d.FirstTime), + EndTimestampMs: int64(d.LastTime), + Encoding: int32(d.C.Encoding()), + } + + buf := bytes.NewBuffer(make([]byte, 0, d.C.Size())) + if err := d.C.Marshal(buf); err != nil { + return nil, err + } + + wireChunk.Data = buf.Bytes() + wireChunks = append(wireChunks, wireChunk) + } + return wireChunks, nil +} + +func fromWireChunks(wireChunks []client.Chunk) ([]*desc, error) { + descs := make([]*desc, 0, len(wireChunks)) + for _, c := range wireChunks { + desc := &desc{ + FirstTime: model.Time(c.StartTimestampMs), + LastTime: model.Time(c.EndTimestampMs), + LastUpdate: model.Now(), + } + + var err error + desc.C, err = encoding.NewForEncoding(encoding.Encoding(byte(c.Encoding))) + if err != nil { + return nil, err + } + + if err := desc.C.UnmarshalFromBuf(c.Data); err != nil { + return nil, err + } + + descs = append(descs, desc) + } + return descs, nil +} + +// TransferOut finds an ingester in PENDING state and transfers our chunks to it. +// Called as part of the ingester shutdown process. +func (i *Ingester) TransferOut(ctx context.Context) error { + if i.cfg.MaxTransferRetries <= 0 { + return ring.ErrTransferDisabled + } + backoff := util.NewBackoff(ctx, util.BackoffConfig{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: 5 * time.Second, + MaxRetries: i.cfg.MaxTransferRetries, + }) + + // Keep track of the last error so that we can log it with the highest level + // once all retries have completed + var err error + + for backoff.Ongoing() { + err = i.transferOut(ctx) + if err == nil { + level.Info(util.Logger).Log("msg", "transfer successfully completed") + return nil + } + + level.Warn(util.Logger).Log("msg", "transfer attempt failed", "err", err, "attempt", backoff.NumRetries()+1, "max_retries", i.cfg.MaxTransferRetries) + + backoff.Wait() + } + + level.Error(util.Logger).Log("msg", "all transfer attempts failed", "err", err) + return backoff.Err() +} + +func (i *Ingester) transferOut(ctx context.Context) error { + if i.cfg.TSDBEnabled { + return i.v2TransferOut(ctx) + } + + userStatesCopy := i.userStates.cp() + if len(userStatesCopy) == 0 { + level.Info(util.Logger).Log("msg", "nothing to transfer") + return nil + } + + targetIngester, err := i.findTargetIngester(ctx) + if err != nil { + return fmt.Errorf("cannot find ingester to transfer chunks to: %w", err) + } + + level.Info(util.Logger).Log("msg", "sending chunks", "to_ingester", targetIngester.Addr) + c, err := i.cfg.ingesterClientFactory(targetIngester.Addr, i.clientConfig) + if err != nil { + return err + } + defer c.Close() + + ctx = user.InjectOrgID(ctx, "-1") + stream, err := c.TransferChunks(ctx) + if err != nil { + return errors.Wrap(err, "TransferChunks") + } + + var chunks []client.Chunk + for userID, state := range userStatesCopy { + for pair := range state.fpToSeries.iter() { + state.fpLocker.Lock(pair.fp) + + if len(pair.series.chunkDescs) == 0 { // Nothing to send? + state.fpLocker.Unlock(pair.fp) + continue + } + + chunks, err = toWireChunks(pair.series.chunkDescs, chunks) + if err != nil { + state.fpLocker.Unlock(pair.fp) + return errors.Wrap(err, "toWireChunks") + } + + err = client.SendTimeSeriesChunk(stream, &client.TimeSeriesChunk{ + FromIngesterId: i.lifecycler.ID, + UserId: userID, + Labels: client.FromLabelsToLabelAdapters(pair.series.metric), + Chunks: chunks, + }) + state.fpLocker.Unlock(pair.fp) + if err != nil { + return errors.Wrap(err, "Send") + } + + i.metrics.sentChunks.Add(float64(len(chunks))) + } + } + + _, err = stream.CloseAndRecv() + if err != nil { + return errors.Wrap(err, "CloseAndRecv") + } + + // Close & empty all the flush queues, to unblock waiting workers. + for _, flushQueue := range i.flushQueues { + flushQueue.DiscardAndClose() + } + i.flushQueuesDone.Wait() + + level.Info(util.Logger).Log("msg", "successfully sent chunks", "to_ingester", targetIngester.Addr) + return nil +} + +func (i *Ingester) v2TransferOut(ctx context.Context) error { + // Skip TSDB transfer if there are no DBs + i.userStatesMtx.RLock() + skip := len(i.TSDBState.dbs) == 0 + i.userStatesMtx.RUnlock() + + if skip { + level.Info(util.Logger).Log("msg", "the ingester has nothing to transfer") + return nil + } + + // This transfer function may be called multiple times in case of error, + // until the max number of retries is reached. For this reason, we run + // some initialization only once. + i.TSDBState.transferOnce.Do(func() { + // In order to transfer TSDB WAL without closing the TSDB itself - which is a + // pre-requisite to continue serving read requests while transferring - we need + // to make sure no more series will be written to the TSDB. For this reason, we + // wait until all in-flight write requests have been completed. No new write + // requests will be accepted because the "stopped" flag has already been set. + level.Info(util.Logger).Log("msg", "waiting for in-flight write requests to complete") + + // Do not use the parent context cause we don't want to interrupt while waiting + // for in-flight requests to complete if the parent context is cancelled, given + // this logic run only once. + waitCtx, waitCancel := context.WithTimeout(context.Background(), 10*time.Second) + defer waitCancel() + + if err := util.WaitGroup(waitCtx, &i.TSDBState.inflightWriteReqs); err != nil { + level.Warn(util.Logger).Log("msg", "timeout expired while waiting in-flight write requests to complete, transfer will continue anyway", "err", err) + } + + // Before beginning transfer, we need to make sure no WAL compaction will occur. + // If there's an on-going compaction, the DisableCompactions() will wait until + // completed. + level.Info(util.Logger).Log("msg", "disabling compaction on all TSDBs") + + i.userStatesMtx.RLock() + wg := &sync.WaitGroup{} + wg.Add(len(i.TSDBState.dbs)) + + for _, userDB := range i.TSDBState.dbs { + go func(db *userTSDB) { + defer wg.Done() + db.DisableCompactions() + }(userDB) + } + + i.userStatesMtx.RUnlock() + wg.Wait() + }) + + // Look for a joining ingester to transfer blocks and WAL to + targetIngester, err := i.findTargetIngester(ctx) + if err != nil { + return errors.Wrap(err, "cannot find ingester to transfer blocks to") + } + + level.Info(util.Logger).Log("msg", "begin transferring TSDB blocks and WAL to joining ingester", "to_ingester", targetIngester.Addr) + c, err := i.cfg.ingesterClientFactory(targetIngester.Addr, i.clientConfig) + if err != nil { + return err + } + defer c.Close() + + ctx = user.InjectOrgID(ctx, "-1") + stream, err := c.TransferTSDB(ctx) + if err != nil { + return errors.Wrap(err, "TransferTSDB() has failed") + } + + // Grab a list of all blocks that need to be shipped + blocks, err := unshippedBlocks(i.cfg.TSDBConfig.Dir) + if err != nil { + return err + } + + for user, blockIDs := range blocks { + // Transfer the users TSDB + // TODO(thor) transferring users can be done concurrently + i.transferUser(ctx, stream, i.cfg.TSDBConfig.Dir, i.lifecycler.ID, user, blockIDs) + } + + _, err = stream.CloseAndRecv() + if err != nil { + return errors.Wrap(err, "CloseAndRecv") + } + + // The transfer out has been successfully completed. Now we should close + // all open TSDBs: the Close() will wait until all on-going read operations + // will be completed. + i.closeAllTSDB() + + return nil +} + +// findTargetIngester finds an ingester in PENDING state. +func (i *Ingester) findTargetIngester(ctx context.Context) (*ring.IngesterDesc, error) { + ringDesc, err := i.lifecycler.KVStore.Get(ctx, i.lifecycler.RingKey) + if err != nil { + return nil, err + } else if ringDesc == nil { + return nil, errTransferNoPendingIngesters + } + + ingesters := ringDesc.(*ring.Desc).FindIngestersByState(ring.PENDING) + if len(ingesters) <= 0 { + return nil, errTransferNoPendingIngesters + } + + return &ingesters[0], nil +} + +// unshippedBlocks returns a ulid list of blocks that haven't been shipped +func unshippedBlocks(dir string) (map[string][]string, error) { + userIDs, err := ioutil.ReadDir(dir) + if err != nil { + return nil, errors.Wrap(err, "unable to list the directory containing TSDB blocks") + } + + blocks := make(map[string][]string, len(userIDs)) + for _, user := range userIDs { + userID := user.Name() + userDir := filepath.Join(dir, userID) + + // Ensure the user dir is actually a directory. There may be spurious files + // in the storage, especially when using Minio in the local development environment. + if stat, err := os.Stat(userDir); err == nil && !stat.IsDir() { + level.Warn(util.Logger).Log("msg", "skipping entry while transferring TSDB blocks because not a directory", "path", userDir) + continue + } + + // Seed the map with the userID to ensure we transfer the WAL, even if all blocks are shipped. + blocks[userID] = []string{} + + blockIDs, err := ioutil.ReadDir(userDir) + if err != nil { + return nil, err + } + + m, err := shipper.ReadMetaFile(userDir) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + // If the meta file doesn't exit, it means the first sync for this + // user didn't occur yet, so we're going to consider all blocks unshipped. + m = &shipper.Meta{} + } + + shipped := make(map[string]bool) + for _, u := range m.Uploaded { + shipped[u.String()] = true + } + + for _, blockID := range blockIDs { + _, err := ulid.Parse(blockID.Name()) + if err != nil { + continue + } + + if _, ok := shipped[blockID.Name()]; !ok { + blocks[userID] = append(blocks[userID], blockID.Name()) + } + } + } + + return blocks, nil +} + +func (i *Ingester) transferUser(ctx context.Context, stream client.Ingester_TransferTSDBClient, dir, ingesterID, userID string, blocks []string) { + level.Info(util.Logger).Log("msg", "transferring user blocks", "user", userID) + // Transfer all blocks + for _, blk := range blocks { + err := filepath.Walk(filepath.Join(dir, userID, blk), func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil + } + + if info.IsDir() { + return nil + } + + b, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + p, err := filepath.Rel(dir, path) + if err != nil { + return err + } + + if err := batchSend(1024*1024, b, stream, &client.TimeSeriesFile{ + FromIngesterId: ingesterID, + UserId: userID, + Filename: p, + }, i.metrics.sentBytes); err != nil { + return err + } + + i.metrics.sentFiles.Add(1) + return nil + }) + if err != nil { + level.Warn(util.Logger).Log("msg", "failed to transfer all user blocks", "err", err) + } + } + + // Transfer WAL + level.Info(util.Logger).Log("msg", "transferring user WAL", "user", userID) + err := filepath.Walk(filepath.Join(dir, userID, "wal"), func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil + } + + if info.IsDir() { + return nil + } + + b, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + p, err := filepath.Rel(dir, path) + if err != nil { + return err + } + + if err := batchSend(1024*1024, b, stream, &client.TimeSeriesFile{ + FromIngesterId: ingesterID, + UserId: userID, + Filename: p, + }, i.metrics.sentBytes); err != nil { + return err + } + + i.metrics.sentFiles.Add(1) + return nil + }) + + if err != nil { + level.Warn(util.Logger).Log("msg", "failed to transfer user WAL", "err", err) + } + + level.Info(util.Logger).Log("msg", "user blocks and WAL transfer completed", "user", userID) +} + +func batchSend(batch int, b []byte, stream client.Ingester_TransferTSDBClient, tsfile *client.TimeSeriesFile, sentBytes prometheus.Counter) error { + // Split file into smaller blocks for xfer + i := 0 + for ; i+batch < len(b); i += batch { + tsfile.Data = b[i : i+batch] + err := client.SendTimeSeriesFile(stream, tsfile) + if err != nil { + return err + } + sentBytes.Add(float64(len(tsfile.Data))) + } + + // Send final data + if i < len(b) { + tsfile.Data = b[i:] + err := client.SendTimeSeriesFile(stream, tsfile) + if err != nil { + return err + } + sentBytes.Add(float64(len(tsfile.Data))) + } + + return nil +} + +func removeEmptyDir(dir string) error { + if _, err := os.Stat(dir); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + return os.Remove(dir) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go new file mode 100644 index 000000000000..7e1029fe62b2 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go @@ -0,0 +1,349 @@ +package ingester + +import ( + "context" + "fmt" + "net/http" + "sync" + + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/segmentio/fasthash/fnv1a" + "github.com/weaveworks/common/httpgrpc" + "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/ingester/index" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/extract" + "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/validation" +) + +// userStates holds the userState object for all users (tenants), +// each one containing all the in-memory series for a given user. +type userStates struct { + states sync.Map + limiter *Limiter + cfg Config + metrics *ingesterMetrics +} + +type userState struct { + limiter *Limiter + userID string + fpLocker *fingerprintLocker + fpToSeries *seriesMap + mapper *fpMapper + index *index.InvertedIndex + ingestedAPISamples *ewmaRate + ingestedRuleSamples *ewmaRate + + seriesInMetric []metricCounterShard + + memSeries prometheus.Gauge + memSeriesCreatedTotal prometheus.Counter + memSeriesRemovedTotal prometheus.Counter + discardedSamples *prometheus.CounterVec + createdChunks prometheus.Counter +} + +const metricCounterShards = 128 + +// DiscardedSamples metric labels +const ( + perUserSeriesLimit = "per_user_series_limit" + perMetricSeriesLimit = "per_metric_series_limit" +) + +type metricCounterShard struct { + mtx sync.Mutex + m map[string]int +} + +func newUserStates(limiter *Limiter, cfg Config, metrics *ingesterMetrics) *userStates { + return &userStates{ + limiter: limiter, + cfg: cfg, + metrics: metrics, + } +} + +func (us *userStates) cp() map[string]*userState { + states := map[string]*userState{} + us.states.Range(func(key, value interface{}) bool { + states[key.(string)] = value.(*userState) + return true + }) + return states +} + +//nolint:unused +func (us *userStates) gc() { + us.states.Range(func(key, value interface{}) bool { + state := value.(*userState) + if state.fpToSeries.length() == 0 { + us.states.Delete(key) + } + return true + }) +} + +func (us *userStates) updateRates() { + us.states.Range(func(key, value interface{}) bool { + state := value.(*userState) + state.ingestedAPISamples.tick() + state.ingestedRuleSamples.tick() + return true + }) +} + +func (us *userStates) get(userID string) (*userState, bool) { + state, ok := us.states.Load(userID) + if !ok { + return nil, ok + } + return state.(*userState), ok +} + +func (us *userStates) getOrCreate(userID string) *userState { + state, ok := us.get(userID) + if !ok { + + seriesInMetric := make([]metricCounterShard, 0, metricCounterShards) + for i := 0; i < metricCounterShards; i++ { + seriesInMetric = append(seriesInMetric, metricCounterShard{ + m: map[string]int{}, + }) + } + + // Speculatively create a userState object and try to store it + // in the map. Another goroutine may have got there before + // us, in which case this userState will be discarded + state = &userState{ + userID: userID, + limiter: us.limiter, + fpToSeries: newSeriesMap(), + fpLocker: newFingerprintLocker(16 * 1024), + index: index.New(), + ingestedAPISamples: newEWMARate(0.2, us.cfg.RateUpdatePeriod), + ingestedRuleSamples: newEWMARate(0.2, us.cfg.RateUpdatePeriod), + seriesInMetric: seriesInMetric, + + memSeries: us.metrics.memSeries, + memSeriesCreatedTotal: us.metrics.memSeriesCreatedTotal.WithLabelValues(userID), + memSeriesRemovedTotal: us.metrics.memSeriesRemovedTotal.WithLabelValues(userID), + discardedSamples: validation.DiscardedSamples.MustCurryWith(prometheus.Labels{"user": userID}), + createdChunks: us.metrics.createdChunks, + } + state.mapper = newFPMapper(state.fpToSeries) + stored, ok := us.states.LoadOrStore(userID, state) + if !ok { + us.metrics.memUsers.Inc() + } + state = stored.(*userState) + } + + return state +} + +// teardown ensures metrics are accurately updated if a userStates struct is discarded +func (us *userStates) teardown() { + for _, u := range us.cp() { + u.memSeriesRemovedTotal.Add(float64(u.fpToSeries.length())) + u.memSeries.Sub(float64(u.fpToSeries.length())) + us.metrics.memUsers.Dec() + } +} + +func (us *userStates) getViaContext(ctx context.Context) (*userState, bool, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, false, fmt.Errorf("no user id") + } + state, ok := us.get(userID) + return state, ok, nil +} + +// NOTE: memory for `labels` is unsafe; anything retained beyond the +// life of this function must be copied +func (us *userStates) getOrCreateSeries(ctx context.Context, userID string, labels []client.LabelAdapter, record *Record) (*userState, model.Fingerprint, *memorySeries, error) { + state := us.getOrCreate(userID) + // WARNING: `err` may have a reference to unsafe memory in `labels` + fp, series, err := state.getSeries(labels, record) + return state, fp, series, err +} + +// NOTE: memory for `metric` is unsafe; anything retained beyond the +// life of this function must be copied +func (u *userState) getSeries(metric labelPairs, record *Record) (model.Fingerprint, *memorySeries, error) { + rawFP := client.FastFingerprint(metric) + u.fpLocker.Lock(rawFP) + fp := u.mapper.mapFP(rawFP, metric) + if fp != rawFP { + u.fpLocker.Unlock(rawFP) + u.fpLocker.Lock(fp) + } + + series, ok := u.fpToSeries.get(fp) + if ok { + return fp, series, nil + } + + series, err := u.createSeriesWithFingerprint(fp, metric, record, false) + if err != nil { + u.fpLocker.Unlock(fp) + return 0, nil, err + } + + return fp, series, nil +} + +func (u *userState) createSeriesWithFingerprint(fp model.Fingerprint, metric labelPairs, record *Record, recovery bool) (*memorySeries, error) { + // There's theoretically a relatively harmless race here if multiple + // goroutines get the length of the series map at the same time, then + // all proceed to add a new series. This is likely not worth addressing, + // as this should happen rarely (all samples from one push are added + // serially), and the overshoot in allowed series would be minimal. + + if !recovery { + if err := u.limiter.AssertMaxSeriesPerUser(u.userID, u.fpToSeries.length()); err != nil { + return nil, makeLimitError(perUserSeriesLimit, err) + } + } + + // MetricNameFromLabelAdapters returns a copy of the string in `metric` + metricName, err := extract.MetricNameFromLabelAdapters(metric) + if err != nil { + return nil, err + } + + if !recovery { + // Check if the per-metric limit has been exceeded + if err = u.canAddSeriesFor(string(metricName)); err != nil { + // WARNING: returns a reference to `metric` + return nil, makeMetricLimitError(perMetricSeriesLimit, client.FromLabelAdaptersToLabels(metric), err) + } + } + + u.memSeriesCreatedTotal.Inc() + u.memSeries.Inc() + + if record != nil { + record.Labels = append(record.Labels, Labels{ + Fingerprint: uint64(fp), + Labels: metric, + }) + } + + labels := u.index.Add(metric, fp) // Add() returns 'interned' values so the original labels are not retained + series := newMemorySeries(labels, u.createdChunks) + u.fpToSeries.put(fp, series) + + return series, nil +} + +func (u *userState) canAddSeriesFor(metric string) error { + shard := &u.seriesInMetric[util.HashFP(model.Fingerprint(fnv1a.HashString64(string(metric))))%metricCounterShards] + shard.mtx.Lock() + defer shard.mtx.Unlock() + + err := u.limiter.AssertMaxSeriesPerMetric(u.userID, shard.m[metric]) + if err != nil { + return err + } + + shard.m[metric]++ + return nil +} + +func (u *userState) removeSeries(fp model.Fingerprint, metric labels.Labels) { + u.fpToSeries.del(fp) + u.index.Delete(labels.Labels(metric), fp) + + metricName := metric.Get(model.MetricNameLabel) + if metricName == "" { + // Series without a metric name should never be able to make it into + // the ingester's memory storage. + panic("No metric name label") + } + + shard := &u.seriesInMetric[util.HashFP(model.Fingerprint(fnv1a.HashString64(string(metricName))))%metricCounterShards] + shard.mtx.Lock() + defer shard.mtx.Unlock() + + shard.m[metricName]-- + if shard.m[metricName] == 0 { + delete(shard.m, metricName) + } + + u.memSeriesRemovedTotal.Inc() + u.memSeries.Dec() +} + +// forSeriesMatching passes all series matching the given matchers to the +// provided callback. Deals with locking and the quirks of zero-length matcher +// values. There are 2 callbacks: +// - The `add` callback is called for each series while the lock is held, and +// is intend to be used by the caller to build a batch. +// - The `send` callback is called at certain intervals specified by batchSize +// with no locks held, and is intended to be used by the caller to send the +// built batches. +func (u *userState) forSeriesMatching(ctx context.Context, allMatchers []*labels.Matcher, + add func(context.Context, model.Fingerprint, *memorySeries) error, + send func(context.Context) error, batchSize int, +) error { + log, ctx := spanlogger.New(ctx, "forSeriesMatching") + defer log.Finish() + + filters, matchers := util.SplitFiltersAndMatchers(allMatchers) + fps := u.index.Lookup(matchers) + if len(fps) > u.limiter.MaxSeriesPerQuery(u.userID) { + return httpgrpc.Errorf(http.StatusRequestEntityTooLarge, "exceeded maximum number of series in a query") + } + + level.Debug(log).Log("series", len(fps)) + + // We only hold one FP lock at once here, so no opportunity to deadlock. + sent := 0 +outer: + for _, fp := range fps { + if err := ctx.Err(); err != nil { + return err + } + + u.fpLocker.Lock(fp) + series, ok := u.fpToSeries.get(fp) + if !ok { + u.fpLocker.Unlock(fp) + continue + } + + for _, filter := range filters { + if !filter.Matches(series.metric.Get(filter.Name)) { + u.fpLocker.Unlock(fp) + continue outer + } + } + + err := add(ctx, fp, series) + u.fpLocker.Unlock(fp) + if err != nil { + return err + } + + sent++ + if batchSize > 0 && sent%batchSize == 0 && send != nil { + if err = send(ctx); err != nil { + return nil + } + } + } + + if batchSize > 0 && sent%batchSize > 0 && send != nil { + return send(ctx) + } + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go new file mode 100644 index 000000000000..398a42388ba6 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go @@ -0,0 +1,938 @@ +package ingester + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-kit/kit/log/level" + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/common/model" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/prometheus/prometheus/tsdb/wal" + + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/util" +) + +// WALConfig is config for the Write Ahead Log. +type WALConfig struct { + WALEnabled bool `yaml:"wal_enabled"` + CheckpointEnabled bool `yaml:"checkpoint_enabled"` + Recover bool `yaml:"recover_from_wal"` + Dir string `yaml:"wal_dir"` + CheckpointDuration time.Duration `yaml:"checkpoint_duration"` +} + +// RegisterFlags adds the flags required to config this to the given FlagSet +func (cfg *WALConfig) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.Dir, "ingester.wal-dir", "wal", "Directory to store the WAL and/or recover from WAL.") + f.BoolVar(&cfg.Recover, "ingester.recover-from-wal", false, "Recover data from existing WAL irrespective of WAL enabled/disabled.") + f.BoolVar(&cfg.WALEnabled, "ingester.wal-enabled", false, "Enable writing of ingested data into WAL.") + f.BoolVar(&cfg.CheckpointEnabled, "ingester.checkpoint-enabled", true, "Enable checkpointing of in-memory chunks. It should always be true when using normally. Set it to false iff you are doing some small tests as there is no mechanism to delete the old WAL yet if checkpoint is disabled.") + f.DurationVar(&cfg.CheckpointDuration, "ingester.checkpoint-duration", 30*time.Minute, "Interval at which checkpoints should be created.") +} + +// WAL interface allows us to have a no-op WAL when the WAL is disabled. +type WAL interface { + // Log marshalls the records and writes it into the WAL. + Log(*Record) error + // Stop stops all the WAL operations. + Stop() +} + +type noopWAL struct{} + +func (noopWAL) Log(*Record) error { return nil } +func (noopWAL) Stop() {} + +type walWrapper struct { + cfg WALConfig + quit chan struct{} + wait sync.WaitGroup + + wal *wal.WAL + getUserStates func() map[string]*userState + checkpointMtx sync.Mutex + + // Checkpoint metrics. + checkpointDeleteFail prometheus.Counter + checkpointDeleteTotal prometheus.Counter + checkpointCreationFail prometheus.Counter + checkpointCreationTotal prometheus.Counter + checkpointDuration prometheus.Summary +} + +// newWAL creates a WAL object. If the WAL is disabled, then the returned WAL is a no-op WAL. +func newWAL(cfg WALConfig, userStatesFunc func() map[string]*userState, registerer prometheus.Registerer) (WAL, error) { + if !cfg.WALEnabled { + return &noopWAL{}, nil + } + + util.WarnExperimentalUse("Chunks WAL") + + var walRegistry prometheus.Registerer + if registerer != nil { + walRegistry = prometheus.WrapRegistererWith(prometheus.Labels{"kind": "wal"}, registerer) + } + tsdbWAL, err := wal.NewSize(util.Logger, walRegistry, cfg.Dir, wal.DefaultSegmentSize/4, true) + if err != nil { + return nil, err + } + + w := &walWrapper{ + cfg: cfg, + quit: make(chan struct{}), + wal: tsdbWAL, + getUserStates: userStatesFunc, + } + + w.checkpointDeleteFail = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_checkpoint_deletions_failed_total", + Help: "Total number of checkpoint deletions that failed.", + }) + w.checkpointDeleteTotal = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_checkpoint_deletions_total", + Help: "Total number of checkpoint deletions attempted.", + }) + w.checkpointCreationFail = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_checkpoint_creations_failed_total", + Help: "Total number of checkpoint creations that failed.", + }) + w.checkpointCreationTotal = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_ingester_checkpoint_creations_total", + Help: "Total number of checkpoint creations attempted.", + }) + w.checkpointDuration = promauto.With(registerer).NewSummary(prometheus.SummaryOpts{ + Name: "cortex_ingester_checkpoint_duration_seconds", + Help: "Time taken to create a checkpoint.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }) + + w.wait.Add(1) + go w.run() + return w, nil +} + +func (w *walWrapper) Stop() { + close(w.quit) + w.wait.Wait() + w.wal.Close() +} + +func (w *walWrapper) Log(record *Record) error { + select { + case <-w.quit: + return nil + default: + if record == nil { + return nil + } + buf, err := proto.Marshal(record) + if err != nil { + return err + } + return w.wal.Log(buf) + } +} + +func (w *walWrapper) run() { + defer w.wait.Done() + + if !w.cfg.CheckpointEnabled { + return + } + + ticker := time.NewTicker(w.cfg.CheckpointDuration) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + start := time.Now() + level.Info(util.Logger).Log("msg", "starting checkpoint") + if err := w.performCheckpoint(false); err != nil { + level.Error(util.Logger).Log("msg", "error checkpointing series", "err", err) + continue + } + elapsed := time.Since(start) + level.Info(util.Logger).Log("msg", "checkpoint done", "time", elapsed.String()) + w.checkpointDuration.Observe(elapsed.Seconds()) + case <-w.quit: + level.Info(util.Logger).Log("msg", "creating checkpoint before shutdown") + if err := w.performCheckpoint(true); err != nil { + level.Error(util.Logger).Log("msg", "error checkpointing series during shutdown", "err", err) + } + return + } + } +} + +const checkpointPrefix = "checkpoint." + +func (w *walWrapper) performCheckpoint(immediate bool) (err error) { + if !w.cfg.CheckpointEnabled { + return nil + } + + // This method is called during shutdown which can interfere with ongoing checkpointing. + // Hence to avoid any race between file creation and WAL truncation, we hold this lock here. + w.checkpointMtx.Lock() + defer w.checkpointMtx.Unlock() + + w.checkpointCreationTotal.Inc() + defer func() { + if err != nil { + w.checkpointCreationFail.Inc() + } + }() + + if w.getUserStates == nil { + return errors.New("function to get user states not initialised") + } + + _, lastSegment, err := w.wal.Segments() + if err != nil { + return err + } + if lastSegment < 0 { + // There are no WAL segments. No need of checkpoint yet. + return nil + } + + _, lastCh, err := lastCheckpoint(w.wal.Dir()) + if err != nil { + return err + } + + if lastCh == lastSegment { + // As the checkpoint name is taken from last WAL segment, we need to ensure + // a new segment for every checkpoint so that the old checkpoint is not overwritten. + if err := w.wal.NextSegment(); err != nil { + return err + } + + _, lastSegment, err = w.wal.Segments() + if err != nil { + return err + } + } + + // Checkpoint is named after the last WAL segment present so that when replaying the WAL + // we can start from that particular WAL segment. + checkpointDir := filepath.Join(w.wal.Dir(), fmt.Sprintf(checkpointPrefix+"%06d", lastSegment)) + level.Info(util.Logger).Log("msg", "attempting checkpoint for", "dir", checkpointDir) + checkpointDirTemp := checkpointDir + ".tmp" + + if err := os.MkdirAll(checkpointDirTemp, 0777); err != nil { + return errors.Wrap(err, "create checkpoint dir") + } + checkpoint, err := wal.New(nil, nil, checkpointDirTemp, true) + if err != nil { + return errors.Wrap(err, "open checkpoint") + } + defer func() { + checkpoint.Close() + os.RemoveAll(checkpointDirTemp) + }() + + // Count number of series - we'll use this to rate limit checkpoints. + numSeries := 0 + us := w.getUserStates() + for _, state := range us { + numSeries += state.fpToSeries.length() + } + if numSeries == 0 { + return nil + } + + var ticker *time.Ticker + if !immediate { + perSeriesDuration := w.cfg.CheckpointDuration / time.Duration(numSeries) + ticker = time.NewTicker(perSeriesDuration) + defer ticker.Stop() + } + + var wireChunkBuf []client.Chunk + for userID, state := range us { + for pair := range state.fpToSeries.iter() { + state.fpLocker.Lock(pair.fp) + wireChunkBuf, err = w.checkpointSeries(checkpoint, userID, pair.fp, pair.series, wireChunkBuf) + state.fpLocker.Unlock(pair.fp) + if err != nil { + return err + } + + if !immediate { + select { + case <-ticker.C: + case <-w.quit: // When we're trying to shutdown, finish the checkpoint as fast as possible. + } + } + } + } + + if err := checkpoint.Close(); err != nil { + return errors.Wrap(err, "close checkpoint") + } + if err := fileutil.Replace(checkpointDirTemp, checkpointDir); err != nil { + return errors.Wrap(err, "rename checkpoint directory") + } + + // We delete the WAL segments which are before the previous checkpoint and not before the + // current checkpoint created. This is because if the latest checkpoint is corrupted for any reason, we + // should be able to recover from the older checkpoint which would need the older WAL segments. + if err := w.wal.Truncate(lastCh); err != nil { + // It is fine to have old WAL segments hanging around if deletion failed. + // We can try again next time. + level.Error(util.Logger).Log("msg", "error deleting old WAL segments", "err", err) + } + + if lastCh >= 0 { + if err := w.deleteCheckpoints(lastCh); err != nil { + // It is fine to have old checkpoints hanging around if deletion failed. + // We can try again next time. + level.Error(util.Logger).Log("msg", "error deleting old checkpoint", "err", err) + } + } + + return nil +} + +// lastCheckpoint returns the directory name and index of the most recent checkpoint. +// If dir does not contain any checkpoints, -1 is returned as index. +func lastCheckpoint(dir string) (string, int, error) { + dirs, err := ioutil.ReadDir(dir) + if err != nil { + return "", -1, err + } + var ( + maxIdx = -1 + checkpointDir string + ) + // There may be multiple checkpoints left, so select the one with max index. + for i := 0; i < len(dirs); i++ { + di := dirs[i] + + if !strings.HasPrefix(di.Name(), checkpointPrefix) { + continue + } + if !di.IsDir() { + return "", -1, fmt.Errorf("checkpoint %s is not a directory", di.Name()) + } + idx, err := strconv.Atoi(di.Name()[len(checkpointPrefix):]) + if err != nil { + continue + } + if idx > maxIdx { + checkpointDir = di.Name() + maxIdx = idx + } + } + if maxIdx >= 0 { + return filepath.Join(dir, checkpointDir), maxIdx, nil + } + return "", -1, nil +} + +// deleteCheckpoints deletes all checkpoints in a directory which is < maxIndex. +func (w *walWrapper) deleteCheckpoints(maxIndex int) (err error) { + w.checkpointDeleteTotal.Inc() + defer func() { + if err != nil { + w.checkpointDeleteFail.Inc() + } + }() + + var errs tsdb_errors.MultiError + + files, err := ioutil.ReadDir(w.wal.Dir()) + if err != nil { + return err + } + for _, fi := range files { + if !strings.HasPrefix(fi.Name(), checkpointPrefix) { + continue + } + index, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):]) + if err != nil || index >= maxIndex { + continue + } + if err := os.RemoveAll(filepath.Join(w.wal.Dir(), fi.Name())); err != nil { + errs.Add(err) + } + } + return errs.Err() +} + +// checkpointSeries write the chunks of the series to the checkpoint. +func (w *walWrapper) checkpointSeries(cp *wal.WAL, userID string, fp model.Fingerprint, series *memorySeries, wireChunks []client.Chunk) ([]client.Chunk, error) { + var err error + wireChunks, err = toWireChunks(series.chunkDescs, wireChunks[:0]) + if err != nil { + return wireChunks, err + } + + buf, err := proto.Marshal(&Series{ + UserId: userID, + Fingerprint: uint64(fp), + Labels: client.FromLabelsToLabelAdapters(series.metric), + Chunks: wireChunks, + }) + if err != nil { + return wireChunks, err + } + + return wireChunks, cp.Log(buf) +} + +type walRecoveryParameters struct { + walDir string + ingester *Ingester + numWorkers int + stateCache []map[string]*userState + seriesCache []map[string]map[uint64]*memorySeries +} + +func recoverFromWAL(ingester *Ingester) error { + params := walRecoveryParameters{ + walDir: ingester.cfg.WALConfig.Dir, + numWorkers: runtime.GOMAXPROCS(0), + ingester: ingester, + } + + params.stateCache = make([]map[string]*userState, params.numWorkers) + params.seriesCache = make([]map[string]map[uint64]*memorySeries, params.numWorkers) + for i := 0; i < params.numWorkers; i++ { + params.stateCache[i] = make(map[string]*userState) + params.seriesCache[i] = make(map[string]map[uint64]*memorySeries) + } + + level.Info(util.Logger).Log("msg", "recovering from checkpoint") + start := time.Now() + userStates, idx, err := processCheckpointWithRepair(params) + if err != nil { + return err + } + elapsed := time.Since(start) + level.Info(util.Logger).Log("msg", "recovered from checkpoint", "time", elapsed.String()) + + if segExists, err := segmentsExist(params.walDir); err == nil && !segExists { + level.Info(util.Logger).Log("msg", "no segments found, skipping recover from segments") + ingester.userStatesMtx.Lock() + ingester.userStates = userStates + ingester.userStatesMtx.Unlock() + return nil + } else if err != nil { + return err + } + + level.Info(util.Logger).Log("msg", "recovering from WAL", "dir", params.walDir, "start_segment", idx) + start = time.Now() + if err := processWALWithRepair(idx, userStates, params); err != nil { + return err + } + elapsed = time.Since(start) + level.Info(util.Logger).Log("msg", "recovered from WAL", "time", elapsed.String()) + + ingester.userStatesMtx.Lock() + ingester.userStates = userStates + ingester.userStatesMtx.Unlock() + return nil +} + +func processCheckpointWithRepair(params walRecoveryParameters) (*userStates, int, error) { + + // Use a local userStates, so we don't need to worry about locking. + userStates := newUserStates(params.ingester.limiter, params.ingester.cfg, params.ingester.metrics) + + lastCheckpointDir, idx, err := lastCheckpoint(params.walDir) + if err != nil { + return nil, -1, err + } + if idx < 0 { + level.Info(util.Logger).Log("msg", "no checkpoint found") + return userStates, -1, nil + } + + level.Info(util.Logger).Log("msg", fmt.Sprintf("recovering from %s", lastCheckpointDir)) + + err = processCheckpoint(lastCheckpointDir, userStates, params) + if err == nil { + return userStates, idx, nil + } + + // We don't call repair on checkpoint as losing even a single record is like losing the entire data of a series. + // We try recovering from the older checkpoint instead. + params.ingester.metrics.walCorruptionsTotal.Inc() + level.Error(util.Logger).Log("msg", "checkpoint recovery failed, deleting this checkpoint and trying to recover from old checkpoint", "err", err) + + // Deleting this checkpoint to try the previous checkpoint. + if err := os.RemoveAll(lastCheckpointDir); err != nil { + return nil, -1, errors.Wrapf(err, "unable to delete checkpoint directory %s", lastCheckpointDir) + } + + // If we have reached this point, it means the last checkpoint was deleted. + // Now the last checkpoint will be the one before the deleted checkpoint. + lastCheckpointDir, idx, err = lastCheckpoint(params.walDir) + if err != nil { + return nil, -1, err + } + + // Creating new userStates to discard the old chunks. + userStates = newUserStates(params.ingester.limiter, params.ingester.cfg, params.ingester.metrics) + if idx < 0 { + // There was only 1 checkpoint. We don't error in this case + // as for the first checkpoint entire WAL will/should be present. + return userStates, -1, nil + } + + level.Info(util.Logger).Log("msg", fmt.Sprintf("attempting recovery from %s", lastCheckpointDir)) + if err := processCheckpoint(lastCheckpointDir, userStates, params); err != nil { + // We won't attempt the repair again even if its the old checkpoint. + params.ingester.metrics.walCorruptionsTotal.Inc() + return nil, -1, err + } + + return userStates, idx, nil +} + +// segmentsExist is a stripped down version of +// https://github.com/prometheus/prometheus/blob/4c648eddf47d7e07fbc74d0b18244402200dca9e/tsdb/wal/wal.go#L739-L760. +func segmentsExist(dir string) (bool, error) { + files, err := fileutil.ReadDir(dir) + if err != nil { + return false, err + } + for _, fn := range files { + if _, err := strconv.Atoi(fn); err == nil { + // First filename which is a number. + // This is how Prometheus stores and this + // is how it checks too. + return true, nil + } + } + return false, nil +} + +// processCheckpoint loads the chunks of the series present in the last checkpoint. +func processCheckpoint(name string, userStates *userStates, params walRecoveryParameters) error { + + reader, closer, err := newWalReader(name, -1) + if err != nil { + return err + } + defer closer.Close() + + var ( + inputs = make([]chan *Series, params.numWorkers) + // errChan is to capture the errors from goroutine. + // The channel size is nWorkers+1 to not block any worker if all of them error out. + errChan = make(chan error, params.numWorkers) + wg = sync.WaitGroup{} + seriesPool = &sync.Pool{ + New: func() interface{} { + return &Series{} + }, + } + ) + + wg.Add(params.numWorkers) + for i := 0; i < params.numWorkers; i++ { + inputs[i] = make(chan *Series, 300) + go func(input <-chan *Series, stateCache map[string]*userState, seriesCache map[string]map[uint64]*memorySeries) { + processCheckpointRecord(userStates, seriesPool, stateCache, seriesCache, input, errChan, params.ingester.metrics.memoryChunks) + wg.Done() + }(inputs[i], params.stateCache[i], params.seriesCache[i]) + } + + var capturedErr error +Loop: + for reader.Next() { + s := seriesPool.Get().(*Series) + if err := proto.Unmarshal(reader.Record(), s); err != nil { + // We don't return here in order to close/drain all the channels and + // make sure all goroutines exit. + capturedErr = err + break Loop + } + // The yoloString from the unmarshal of LabelAdapter gets corrupted + // when travelling through the channel. Hence making a copy of that. + // This extra alloc during the read path is fine as it's only 1 time + // and saves extra allocs during write path by having LabelAdapter. + s.Labels = copyLabelAdapters(s.Labels) + + select { + case capturedErr = <-errChan: + // Exit early on an error. + // Only acts upon the first error received. + break Loop + default: + mod := s.Fingerprint % uint64(params.numWorkers) + inputs[mod] <- s + } + } + + for i := 0; i < params.numWorkers; i++ { + close(inputs[i]) + } + wg.Wait() + // If any worker errored out, some input channels might not be empty. + // Hence drain them. + for i := 0; i < params.numWorkers; i++ { + for range inputs[i] { + } + } + + if capturedErr != nil { + return capturedErr + } + select { + case capturedErr = <-errChan: + return capturedErr + default: + return reader.Err() + } +} + +func copyLabelAdapters(las []client.LabelAdapter) []client.LabelAdapter { + for i := range las { + n, v := make([]byte, len(las[i].Name)), make([]byte, len(las[i].Value)) + copy(n, las[i].Name) + copy(v, las[i].Value) + las[i].Name = string(n) + las[i].Value = string(v) + } + return las +} + +func processCheckpointRecord( + userStates *userStates, + seriesPool *sync.Pool, + stateCache map[string]*userState, + seriesCache map[string]map[uint64]*memorySeries, + seriesChan <-chan *Series, + errChan chan error, + memoryChunks prometheus.Counter, +) { + var la []client.LabelAdapter + for s := range seriesChan { + state, ok := stateCache[s.UserId] + if !ok { + state = userStates.getOrCreate(s.UserId) + stateCache[s.UserId] = state + seriesCache[s.UserId] = make(map[uint64]*memorySeries) + } + + la = la[:0] + for _, l := range s.Labels { + la = append(la, client.LabelAdapter{ + Name: string(l.Name), + Value: string(l.Value), + }) + } + series, err := state.createSeriesWithFingerprint(model.Fingerprint(s.Fingerprint), la, nil, true) + if err != nil { + errChan <- err + return + } + + descs, err := fromWireChunks(s.Chunks) + if err != nil { + errChan <- err + return + } + + if err := series.setChunks(descs); err != nil { + errChan <- err + return + } + memoryChunks.Add(float64(len(descs))) + + seriesCache[s.UserId][s.Fingerprint] = series + seriesPool.Put(s) + } +} + +type samplesWithUserID struct { + samples []Sample + userID string +} + +func processWALWithRepair(startSegment int, userStates *userStates, params walRecoveryParameters) error { + + corruptErr := processWAL(startSegment, userStates, params) + if corruptErr == nil { + return nil + } + + params.ingester.metrics.walCorruptionsTotal.Inc() + level.Error(util.Logger).Log("msg", "error in replaying from WAL", "err", corruptErr) + + // Attempt repair. + level.Info(util.Logger).Log("msg", "attempting repair of the WAL") + w, err := wal.New(util.Logger, nil, params.walDir, true) + if err != nil { + return err + } + + err = w.Repair(corruptErr) + if err != nil { + level.Error(util.Logger).Log("msg", "error in repairing WAL", "err", err) + } + var multiErr tsdb_errors.MultiError + multiErr.Add(err) + multiErr.Add(w.Close()) + + return multiErr.Err() +} + +// processWAL processes the records in the WAL concurrently. +func processWAL(startSegment int, userStates *userStates, params walRecoveryParameters) error { + + reader, closer, err := newWalReader(params.walDir, startSegment) + if err != nil { + return err + } + defer closer.Close() + + var ( + wg sync.WaitGroup + inputs = make([]chan *samplesWithUserID, params.numWorkers) + outputs = make([]chan *samplesWithUserID, params.numWorkers) + // errChan is to capture the errors from goroutine. + // The channel size is nWorkers to not block any worker if all of them error out. + errChan = make(chan error, params.numWorkers) + shards = make([]*samplesWithUserID, params.numWorkers) + ) + + wg.Add(params.numWorkers) + for i := 0; i < params.numWorkers; i++ { + outputs[i] = make(chan *samplesWithUserID, 300) + inputs[i] = make(chan *samplesWithUserID, 300) + shards[i] = &samplesWithUserID{} + + go func(input <-chan *samplesWithUserID, output chan<- *samplesWithUserID, + stateCache map[string]*userState, seriesCache map[string]map[uint64]*memorySeries) { + processWALSamples(userStates, stateCache, seriesCache, input, output, errChan) + wg.Done() + }(inputs[i], outputs[i], params.stateCache[i], params.seriesCache[i]) + } + + var ( + capturedErr error + record = &Record{} + ) +Loop: + for reader.Next() { + select { + case capturedErr = <-errChan: + // Exit early on an error. + // Only acts upon the first error received. + break Loop + default: + } + if err := proto.Unmarshal(reader.Record(), record); err != nil { + // We don't return here in order to close/drain all the channels and + // make sure all goroutines exit. + capturedErr = err + break Loop + } + + if len(record.Labels) > 0 { + state := userStates.getOrCreate(record.UserId) + // Create the series from labels which do not exist. + for _, labels := range record.Labels { + _, ok := state.fpToSeries.get(model.Fingerprint(labels.Fingerprint)) + if ok { + continue + } + _, err := state.createSeriesWithFingerprint(model.Fingerprint(labels.Fingerprint), labels.Labels, nil, true) + if err != nil { + // We don't return here in order to close/drain all the channels and + // make sure all goroutines exit. + capturedErr = err + break Loop + } + } + } + + // We split up the samples into chunks of 5000 samples or less. + // With O(300 * #cores) in-flight sample batches, large scrapes could otherwise + // cause thousands of very large in flight buffers occupying large amounts + // of unused memory. + for len(record.Samples) > 0 { + m := 5000 + if len(record.Samples) < m { + m = len(record.Samples) + } + for i := 0; i < params.numWorkers; i++ { + if len(shards[i].samples) == 0 { + // It is possible that the previous iteration did not put + // anything in this shard. In that case no need to get a new buffer. + shards[i].userID = record.UserId + continue + } + select { + case buf := <-outputs[i]: + buf.samples = buf.samples[:0] + buf.userID = record.UserId + shards[i] = buf + default: + shards[i] = &samplesWithUserID{ + userID: record.UserId, + } + } + } + for _, sam := range record.Samples[:m] { + mod := sam.Fingerprint % uint64(params.numWorkers) + shards[mod].samples = append(shards[mod].samples, sam) + } + for i := 0; i < params.numWorkers; i++ { + if len(shards[i].samples) > 0 { + inputs[i] <- shards[i] + } + } + record.Samples = record.Samples[m:] + } + } + + for i := 0; i < params.numWorkers; i++ { + close(inputs[i]) + for range outputs[i] { + } + } + wg.Wait() + // If any worker errored out, some input channels might not be empty. + // Hence drain them. + for i := 0; i < params.numWorkers; i++ { + for range inputs[i] { + } + } + + if capturedErr != nil { + return capturedErr + } + select { + case capturedErr = <-errChan: + return capturedErr + default: + return reader.Err() + } +} + +func processWALSamples(userStates *userStates, stateCache map[string]*userState, seriesCache map[string]map[uint64]*memorySeries, + input <-chan *samplesWithUserID, output chan<- *samplesWithUserID, errChan chan error) { + defer close(output) + + sp := model.SamplePair{} + for samples := range input { + state, ok := stateCache[samples.userID] + if !ok { + state = userStates.getOrCreate(samples.userID) + stateCache[samples.userID] = state + seriesCache[samples.userID] = make(map[uint64]*memorySeries) + } + sc := seriesCache[samples.userID] + for i := range samples.samples { + series, ok := sc[samples.samples[i].Fingerprint] + if !ok { + series, ok = state.fpToSeries.get(model.Fingerprint(samples.samples[i].Fingerprint)) + if !ok { + // This should ideally not happen. + // If the series was not created in recovering checkpoint or + // from the labels of any records previous to this, there + // is no way to get the labels for this fingerprint. + level.Warn(util.Logger).Log("msg", "series not found for sample during wal recovery", "userid", samples.userID, "fingerprint", model.Fingerprint(samples.samples[i].Fingerprint).String()) + continue + } + } + + sp.Timestamp = model.Time(samples.samples[i].Timestamp) + sp.Value = model.SampleValue(samples.samples[i].Value) + // There can be many out of order samples because of checkpoint and WAL overlap. + // Checking this beforehand avoids the allocation of lots of error messages. + if sp.Timestamp.After(series.lastTime) { + if err := series.add(sp); err != nil { + errChan <- err + return + } + } + } + output <- samples + } +} + +// If startSegment is <0, it means all the segments. +func newWalReader(name string, startSegment int) (*wal.Reader, io.Closer, error) { + var ( + segmentReader io.ReadCloser + err error + ) + if startSegment < 0 { + segmentReader, err = wal.NewSegmentsReader(name) + if err != nil { + return nil, nil, err + } + } else { + first, last, err := SegmentRange(name) + if err != nil { + return nil, nil, err + } + if startSegment > last { + return nil, nil, errors.New("start segment is beyond the last WAL segment") + } + if first > startSegment { + startSegment = first + } + segmentReader, err = wal.NewSegmentsRangeReader(wal.SegmentRange{ + Dir: name, + First: startSegment, + Last: -1, // Till the end. + }) + if err != nil { + return nil, nil, err + } + } + return wal.NewReader(segmentReader), segmentReader, nil +} + +// SegmentRange returns the first and last segment index of the WAL in the dir. +// If https://github.com/prometheus/prometheus/pull/6477 is merged, get rid of this +// method and use from Prometheus directly. +func SegmentRange(dir string) (int, int, error) { + files, err := fileutil.ReadDir(dir) + if err != nil { + return 0, 0, err + } + first, last := math.MaxInt32, math.MinInt32 + for _, fn := range files { + k, err := strconv.Atoi(fn) + if err != nil { + continue + } + if k < first { + first = k + } + if k > last { + last = k + } + } + if first == math.MaxInt32 || last == math.MinInt32 { + return -1, -1, nil + } + return first, last, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.pb.go new file mode 100644 index 000000000000..31970266eda3 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.pb.go @@ -0,0 +1,1532 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: wal.proto + +package ingester + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + client "github.com/cortexproject/cortex/pkg/ingester/client" + github_com_cortexproject_cortex_pkg_ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Record struct { + UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + Labels []Labels `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels"` + Samples []Sample `protobuf:"bytes,3,rep,name=samples,proto3" json:"samples"` +} + +func (m *Record) Reset() { *m = Record{} } +func (*Record) ProtoMessage() {} +func (*Record) Descriptor() ([]byte, []int) { + return fileDescriptor_ae6364fc8077884f, []int{0} +} +func (m *Record) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Record) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Record.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Record) XXX_Merge(src proto.Message) { + xxx_messageInfo_Record.Merge(m, src) +} +func (m *Record) XXX_Size() int { + return m.Size() +} +func (m *Record) XXX_DiscardUnknown() { + xxx_messageInfo_Record.DiscardUnknown(m) +} + +var xxx_messageInfo_Record proto.InternalMessageInfo + +func (m *Record) GetUserId() string { + if m != nil { + return m.UserId + } + return "" +} + +func (m *Record) GetLabels() []Labels { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Record) GetSamples() []Sample { + if m != nil { + return m.Samples + } + return nil +} + +type Labels struct { + Fingerprint uint64 `protobuf:"varint,1,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"` + Labels []github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter `protobuf:"bytes,2,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter" json:"labels"` +} + +func (m *Labels) Reset() { *m = Labels{} } +func (*Labels) ProtoMessage() {} +func (*Labels) Descriptor() ([]byte, []int) { + return fileDescriptor_ae6364fc8077884f, []int{1} +} +func (m *Labels) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Labels) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Labels.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Labels) XXX_Merge(src proto.Message) { + xxx_messageInfo_Labels.Merge(m, src) +} +func (m *Labels) XXX_Size() int { + return m.Size() +} +func (m *Labels) XXX_DiscardUnknown() { + xxx_messageInfo_Labels.DiscardUnknown(m) +} + +var xxx_messageInfo_Labels proto.InternalMessageInfo + +func (m *Labels) GetFingerprint() uint64 { + if m != nil { + return m.Fingerprint + } + return 0 +} + +type Sample struct { + Fingerprint uint64 `protobuf:"varint,1,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"` + Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Value float64 `protobuf:"fixed64,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_ae6364fc8077884f, []int{2} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +func (m *Sample) GetFingerprint() uint64 { + if m != nil { + return m.Fingerprint + } + return 0 +} + +func (m *Sample) GetTimestamp() uint64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *Sample) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Series struct { + UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + Fingerprint uint64 `protobuf:"varint,2,opt,name=fingerprint,proto3" json:"fingerprint,omitempty"` + Labels []github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter `protobuf:"bytes,3,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter" json:"labels"` + Chunks []client.Chunk `protobuf:"bytes,4,rep,name=chunks,proto3" json:"chunks"` +} + +func (m *Series) Reset() { *m = Series{} } +func (*Series) ProtoMessage() {} +func (*Series) Descriptor() ([]byte, []int) { + return fileDescriptor_ae6364fc8077884f, []int{3} +} +func (m *Series) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Series) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Series.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Series) XXX_Merge(src proto.Message) { + xxx_messageInfo_Series.Merge(m, src) +} +func (m *Series) XXX_Size() int { + return m.Size() +} +func (m *Series) XXX_DiscardUnknown() { + xxx_messageInfo_Series.DiscardUnknown(m) +} + +var xxx_messageInfo_Series proto.InternalMessageInfo + +func (m *Series) GetUserId() string { + if m != nil { + return m.UserId + } + return "" +} + +func (m *Series) GetFingerprint() uint64 { + if m != nil { + return m.Fingerprint + } + return 0 +} + +func (m *Series) GetChunks() []client.Chunk { + if m != nil { + return m.Chunks + } + return nil +} + +func init() { + proto.RegisterType((*Record)(nil), "ingester.Record") + proto.RegisterType((*Labels)(nil), "ingester.Labels") + proto.RegisterType((*Sample)(nil), "ingester.Sample") + proto.RegisterType((*Series)(nil), "ingester.Series") +} + +func init() { proto.RegisterFile("wal.proto", fileDescriptor_ae6364fc8077884f) } + +var fileDescriptor_ae6364fc8077884f = []byte{ + // 415 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x53, 0xcd, 0xca, 0xd3, 0x40, + 0x14, 0xcd, 0x34, 0x75, 0x6a, 0xa7, 0x08, 0x3a, 0x08, 0x86, 0x22, 0xd3, 0x90, 0x55, 0x41, 0x4c, + 0x44, 0xf7, 0xa2, 0x75, 0xa3, 0xe0, 0x42, 0xd2, 0x9d, 0x0b, 0x25, 0x3f, 0xd3, 0x74, 0x6c, 0x92, + 0x09, 0x33, 0x13, 0x75, 0x29, 0xf8, 0x02, 0xbe, 0x81, 0x5b, 0x1f, 0xa5, 0xcb, 0x2e, 0x8b, 0x8b, + 0x62, 0x53, 0x04, 0x97, 0x7d, 0x04, 0xc9, 0x24, 0xd1, 0x52, 0x50, 0x3e, 0xbe, 0xc5, 0xb7, 0xcb, + 0x39, 0xf7, 0xdc, 0x73, 0xcf, 0xdc, 0xcc, 0xa0, 0xe1, 0x87, 0x20, 0x75, 0x0b, 0xc1, 0x15, 0xc7, + 0xd7, 0x59, 0x9e, 0x50, 0xa9, 0xa8, 0x18, 0xdf, 0x4f, 0x98, 0x5a, 0x96, 0xa1, 0x1b, 0xf1, 0xcc, + 0x4b, 0x78, 0xc2, 0x3d, 0x2d, 0x08, 0xcb, 0x85, 0x46, 0x1a, 0xe8, 0xaf, 0xa6, 0x71, 0xfc, 0xe4, + 0x44, 0x1e, 0x71, 0xa1, 0xe8, 0xc7, 0x42, 0xf0, 0x77, 0x34, 0x52, 0x2d, 0xf2, 0x8a, 0x55, 0xe2, + 0x75, 0xe6, 0x5e, 0x94, 0x32, 0x9a, 0x77, 0xa5, 0xc6, 0xc1, 0xf9, 0x0c, 0x10, 0xf4, 0x69, 0xc4, + 0x45, 0x8c, 0xef, 0xa0, 0x41, 0x29, 0xa9, 0x78, 0xcb, 0x62, 0x0b, 0xd8, 0x60, 0x3a, 0xf4, 0x61, + 0x0d, 0x5f, 0xc4, 0xd8, 0x45, 0x30, 0x0d, 0x42, 0x9a, 0x4a, 0xab, 0x67, 0x9b, 0xd3, 0xd1, 0xc3, + 0x9b, 0x6e, 0x67, 0xe9, 0xbe, 0xd4, 0xfc, 0xac, 0xbf, 0xde, 0x4d, 0x0c, 0xbf, 0x55, 0xe1, 0x07, + 0x68, 0x20, 0x83, 0xac, 0x48, 0xa9, 0xb4, 0xcc, 0xf3, 0x86, 0xb9, 0x2e, 0xb4, 0x0d, 0x9d, 0xcc, + 0xf9, 0x0a, 0x10, 0x6c, 0xac, 0xb0, 0x8d, 0x46, 0x8b, 0x5a, 0x2d, 0x0a, 0xc1, 0x72, 0xa5, 0x93, + 0xf4, 0xfd, 0x53, 0x0a, 0xcb, 0xb3, 0x38, 0xb7, 0xdc, 0xf6, 0x44, 0xda, 0xe1, 0x55, 0xc0, 0xc4, + 0xec, 0x79, 0x6d, 0xff, 0x7d, 0x37, 0xb9, 0xcc, 0x7e, 0x1a, 0x9b, 0xa7, 0x71, 0x50, 0x28, 0x2a, + 0xba, 0x33, 0x39, 0x6f, 0x10, 0x6c, 0xa2, 0x5f, 0x20, 0xe0, 0x5d, 0x34, 0x54, 0x2c, 0xa3, 0x52, + 0x05, 0x59, 0x61, 0xf5, 0x74, 0xfd, 0x2f, 0x81, 0x6f, 0xa3, 0x6b, 0xef, 0x83, 0xb4, 0xa4, 0x96, + 0x69, 0x83, 0x29, 0xf0, 0x1b, 0xe0, 0xfc, 0x04, 0x08, 0xce, 0xa9, 0x60, 0x54, 0xfe, 0xfb, 0x3f, + 0x9c, 0x4d, 0xee, 0xfd, 0x6f, 0x35, 0xe6, 0x95, 0xad, 0x06, 0xdf, 0x43, 0x30, 0x5a, 0x96, 0xf9, + 0x4a, 0x5a, 0x7d, 0x3d, 0xf4, 0x46, 0x37, 0xf4, 0x59, 0xcd, 0x76, 0x77, 0xa3, 0x91, 0xcc, 0x1e, + 0x6f, 0xf6, 0xc4, 0xd8, 0xee, 0x89, 0x71, 0xdc, 0x13, 0xf0, 0xa9, 0x22, 0xe0, 0x5b, 0x45, 0xc0, + 0xba, 0x22, 0x60, 0x53, 0x11, 0xf0, 0xa3, 0x22, 0xe0, 0x57, 0x45, 0x8c, 0x63, 0x45, 0xc0, 0x97, + 0x03, 0x31, 0x36, 0x07, 0x62, 0x6c, 0x0f, 0xc4, 0x78, 0xfd, 0xe7, 0x81, 0x84, 0x50, 0x5f, 0xdb, + 0x47, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x83, 0xd9, 0xb8, 0x9a, 0x3e, 0x03, 0x00, 0x00, +} + +func (this *Record) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Record) + if !ok { + that2, ok := that.(Record) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.UserId != that1.UserId { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(&that1.Labels[i]) { + return false + } + } + if len(this.Samples) != len(that1.Samples) { + return false + } + for i := range this.Samples { + if !this.Samples[i].Equal(&that1.Samples[i]) { + return false + } + } + return true +} +func (this *Labels) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Labels) + if !ok { + that2, ok := that.(Labels) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Fingerprint != that1.Fingerprint { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + return true +} +func (this *Sample) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Sample) + if !ok { + that2, ok := that.(Sample) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Fingerprint != that1.Fingerprint { + return false + } + if this.Timestamp != that1.Timestamp { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *Series) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Series) + if !ok { + that2, ok := that.(Series) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.UserId != that1.UserId { + return false + } + if this.Fingerprint != that1.Fingerprint { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + if len(this.Chunks) != len(that1.Chunks) { + return false + } + for i := range this.Chunks { + if !this.Chunks[i].Equal(&that1.Chunks[i]) { + return false + } + } + return true +} +func (this *Record) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&ingester.Record{") + s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") + if this.Labels != nil { + vs := make([]*Labels, len(this.Labels)) + for i := range vs { + vs[i] = &this.Labels[i] + } + s = append(s, "Labels: "+fmt.Sprintf("%#v", vs)+",\n") + } + if this.Samples != nil { + vs := make([]*Sample, len(this.Samples)) + for i := range vs { + vs[i] = &this.Samples[i] + } + s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Labels) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&ingester.Labels{") + s = append(s, "Fingerprint: "+fmt.Sprintf("%#v", this.Fingerprint)+",\n") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Sample) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&ingester.Sample{") + s = append(s, "Fingerprint: "+fmt.Sprintf("%#v", this.Fingerprint)+",\n") + s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Series) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&ingester.Series{") + s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") + s = append(s, "Fingerprint: "+fmt.Sprintf("%#v", this.Fingerprint)+",\n") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + if this.Chunks != nil { + vs := make([]*client.Chunk, len(this.Chunks)) + for i := range vs { + vs[i] = &this.Chunks[i] + } + s = append(s, "Chunks: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringWal(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Record) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Record) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Record) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.UserId) > 0 { + i -= len(m.UserId) + copy(dAtA[i:], m.UserId) + i = encodeVarintWal(dAtA, i, uint64(len(m.UserId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Labels) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Labels) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Labels) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Fingerprint != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Fingerprint)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x19 + } + if m.Timestamp != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Fingerprint != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Fingerprint)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Series) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Series) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Series) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Chunks) > 0 { + for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintWal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Fingerprint != 0 { + i = encodeVarintWal(dAtA, i, uint64(m.Fingerprint)) + i-- + dAtA[i] = 0x10 + } + if len(m.UserId) > 0 { + i -= len(m.UserId) + copy(dAtA[i:], m.UserId) + i = encodeVarintWal(dAtA, i, uint64(len(m.UserId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintWal(dAtA []byte, offset int, v uint64) int { + offset -= sovWal(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Record) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UserId) + if l > 0 { + n += 1 + l + sovWal(uint64(l)) + } + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovWal(uint64(l)) + } + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovWal(uint64(l)) + } + } + return n +} + +func (m *Labels) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Fingerprint != 0 { + n += 1 + sovWal(uint64(m.Fingerprint)) + } + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovWal(uint64(l)) + } + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Fingerprint != 0 { + n += 1 + sovWal(uint64(m.Fingerprint)) + } + if m.Timestamp != 0 { + n += 1 + sovWal(uint64(m.Timestamp)) + } + if m.Value != 0 { + n += 9 + } + return n +} + +func (m *Series) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UserId) + if l > 0 { + n += 1 + l + sovWal(uint64(l)) + } + if m.Fingerprint != 0 { + n += 1 + sovWal(uint64(m.Fingerprint)) + } + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovWal(uint64(l)) + } + } + if len(m.Chunks) > 0 { + for _, e := range m.Chunks { + l = e.Size() + n += 1 + l + sovWal(uint64(l)) + } + } + return n +} + +func sovWal(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWal(x uint64) (n int) { + return sovWal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Record) String() string { + if this == nil { + return "nil" + } + repeatedStringForLabels := "[]Labels{" + for _, f := range this.Labels { + repeatedStringForLabels += strings.Replace(strings.Replace(f.String(), "Labels", "Labels", 1), `&`, ``, 1) + "," + } + repeatedStringForLabels += "}" + repeatedStringForSamples := "[]Sample{" + for _, f := range this.Samples { + repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + "," + } + repeatedStringForSamples += "}" + s := strings.Join([]string{`&Record{`, + `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, + `Labels:` + repeatedStringForLabels + `,`, + `Samples:` + repeatedStringForSamples + `,`, + `}`, + }, "") + return s +} +func (this *Labels) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Labels{`, + `Fingerprint:` + fmt.Sprintf("%v", this.Fingerprint) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `}`, + }, "") + return s +} +func (this *Sample) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sample{`, + `Fingerprint:` + fmt.Sprintf("%v", this.Fingerprint) + `,`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Series) String() string { + if this == nil { + return "nil" + } + repeatedStringForChunks := "[]Chunk{" + for _, f := range this.Chunks { + repeatedStringForChunks += fmt.Sprintf("%v", f) + "," + } + repeatedStringForChunks += "}" + s := strings.Join([]string{`&Series{`, + `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, + `Fingerprint:` + fmt.Sprintf("%v", this.Fingerprint) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Chunks:` + repeatedStringForChunks + `,`, + `}`, + }, "") + return s +} +func valueToStringWal(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Record) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Record: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Labels{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Labels) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Labels: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Labels: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Fingerprint", wireType) + } + m.Fingerprint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Fingerprint |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Fingerprint", wireType) + } + m.Fingerprint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Fingerprint |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Series) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Series: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Series: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Fingerprint", wireType) + } + m.Fingerprint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Fingerprint |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunks = append(m.Chunks, client.Chunk{}) + if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWal(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWal(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWal + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthWal + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWal(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthWal + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWal = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWal = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.proto b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.proto new file mode 100644 index 000000000000..25b5361dde2a --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package ingester; + +option go_package = "ingester"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto"; + +message Record { + string user_id = 1; + repeated Labels labels = 2 [(gogoproto.nullable) = false]; + repeated Sample samples = 3 [(gogoproto.nullable) = false]; +} + +message Labels { + uint64 fingerprint = 1; + repeated cortex.LabelPair labels = 2 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter"]; +} + +message Sample { + uint64 fingerprint = 1; + uint64 timestamp = 2; + double value = 3; +} + +message Series { + string user_id = 1; + uint64 fingerprint = 2; + repeated cortex.LabelPair labels = 3 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter"]; + repeated cortex.Chunk chunks = 4 [(gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/batch.go b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/batch.go new file mode 100644 index 000000000000..9a199b0a3d13 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/batch.go @@ -0,0 +1,93 @@ +package batch + +import ( + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/storage" + + "github.com/cortexproject/cortex/pkg/chunk" + promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" +) + +// iterator iterates over batches. +type iterator interface { + // Seek to the batch at (or after) time t. + Seek(t int64, size int) bool + + // Next moves to the next batch. + Next(size int) bool + + // AtTime returns the start time of the next batch. Must only be called after + // Seek or Next have returned true. + AtTime() int64 + + // Batch returns the current batch. Must only be called after Seek or Next + // have returned true. + Batch() promchunk.Batch + + Err() error +} + +// NewChunkMergeIterator returns a storage.SeriesIterator that merges chunks together. +func NewChunkMergeIterator(chunks []chunk.Chunk, _, _ model.Time) storage.SeriesIterator { + iter := newMergeIterator(chunks) + return newIteratorAdapter(iter) +} + +// iteratorAdapter turns a batchIterator into a storage.SeriesIterator. +// It fetches ever increasing batchSizes (up to promchunk.BatchSize) on each +// call to Next; on calls to Seek, resets batch size to 1. +type iteratorAdapter struct { + batchSize int + curr promchunk.Batch + underlying iterator +} + +func newIteratorAdapter(underlying iterator) storage.SeriesIterator { + return &iteratorAdapter{ + batchSize: 1, + underlying: underlying, + } +} + +// Seek implements storage.SeriesIterator. +func (a *iteratorAdapter) Seek(t int64) bool { + // Optimisation: see if the seek is within the current batch. + if a.curr.Length > 0 && t >= a.curr.Timestamps[0] && t <= a.curr.Timestamps[a.curr.Length-1] { + a.curr.Index = 0 + for a.curr.Index < a.curr.Length && t > a.curr.Timestamps[a.curr.Index] { + a.curr.Index++ + } + return true + } + + a.curr.Length = -1 + a.batchSize = 1 + if a.underlying.Seek(t, a.batchSize) { + a.curr = a.underlying.Batch() + return a.curr.Index < a.curr.Length + } + return false +} + +// Next implements storage.SeriesIterator. +func (a *iteratorAdapter) Next() bool { + a.curr.Index++ + for a.curr.Index >= a.curr.Length && a.underlying.Next(a.batchSize) { + a.curr = a.underlying.Batch() + a.batchSize = a.batchSize * 2 + if a.batchSize > promchunk.BatchSize { + a.batchSize = promchunk.BatchSize + } + } + return a.curr.Index < a.curr.Length +} + +// At implements storage.SeriesIterator. +func (a *iteratorAdapter) At() (int64, float64) { + return a.curr.Timestamps[a.curr.Index], a.curr.Values[a.curr.Index] +} + +// Err implements storage.SeriesIterator. +func (a *iteratorAdapter) Err() error { + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/chunk.go b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/chunk.go new file mode 100644 index 000000000000..a9fc17e5568f --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/chunk.go @@ -0,0 +1,71 @@ +package batch + +import ( + "github.com/prometheus/common/model" + + "github.com/cortexproject/cortex/pkg/chunk" + promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" +) + +// chunkIterator implement batchIterator over a chunk. Its is designed to be +// reused by calling reset() with a fresh chunk. +type chunkIterator struct { + chunk chunk.Chunk + it promchunk.Iterator + batch promchunk.Batch +} + +func (i *chunkIterator) reset(chunk chunk.Chunk) { + i.chunk = chunk + i.it = chunk.Data.NewIterator(i.it) + i.batch.Length = 0 + i.batch.Index = 0 +} + +// Seek advances the iterator forward to the value at or after +// the given timestamp. +func (i *chunkIterator) Seek(t int64, size int) bool { + // We assume seeks only care about a specific window; if this chunk doesn't + // contain samples in that window, we can shortcut. + if int64(i.chunk.Through) < t { + return false + } + + // If the seek is to the middle of the current batch, and size fits, we can + // shortcut. + if i.batch.Length > 0 && t >= i.batch.Timestamps[0] && t <= i.batch.Timestamps[i.batch.Length-1] { + i.batch.Index = 0 + for i.batch.Index < i.batch.Length && t > i.batch.Timestamps[i.batch.Index] { + i.batch.Index++ + } + if i.batch.Index+size < i.batch.Length { + return true + } + } + + if i.it.FindAtOrAfter(model.Time(t)) { + i.batch = i.it.Batch(size) + return i.batch.Length > 0 + } + return false +} + +func (i *chunkIterator) Next(size int) bool { + if i.it.Scan() { + i.batch = i.it.Batch(size) + return i.batch.Length > 0 + } + return false +} + +func (i *chunkIterator) AtTime() int64 { + return i.batch.Timestamps[0] +} + +func (i *chunkIterator) Batch() promchunk.Batch { + return i.batch +} + +func (i *chunkIterator) Err() error { + return i.it.Err() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/merge.go b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/merge.go new file mode 100644 index 000000000000..1def7f9935e1 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/merge.go @@ -0,0 +1,189 @@ +package batch + +import ( + "container/heap" + "sort" + + "github.com/cortexproject/cortex/pkg/chunk" + promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" +) + +type mergeIterator struct { + its []*nonOverlappingIterator + h iteratorHeap + + // Store the current sorted batchStream + batches batchStream + + // Buffers to merge in. + batchesBuf batchStream + nextBatchBuf [1]promchunk.Batch + + currErr error +} + +func newMergeIterator(cs []chunk.Chunk) *mergeIterator { + css := partitionChunks(cs) + its := make([]*nonOverlappingIterator, 0, len(css)) + for _, cs := range css { + its = append(its, newNonOverlappingIterator(cs)) + } + + c := &mergeIterator{ + its: its, + h: make(iteratorHeap, 0, len(its)), + batches: make(batchStream, 0, len(its)*2*promchunk.BatchSize), + batchesBuf: make(batchStream, len(its)*2*promchunk.BatchSize), + } + + for _, iter := range c.its { + if iter.Next(1) { + c.h = append(c.h, iter) + continue + } + + if err := iter.Err(); err != nil { + c.currErr = err + } + } + + heap.Init(&c.h) + return c +} + +func (c *mergeIterator) Seek(t int64, size int) bool { + + // Optimisation to see if the seek is within our current caches batches. +found: + for len(c.batches) > 0 { + batch := &c.batches[0] + if t >= batch.Timestamps[0] && t <= batch.Timestamps[batch.Length-1] { + batch.Index = 0 + for batch.Index < batch.Length && t > batch.Timestamps[batch.Index] { + batch.Index++ + } + break found + } + copy(c.batches, c.batches[1:]) + c.batches = c.batches[:len(c.batches)-1] + } + + // If we didn't find anything in the current set of batches, reset the heap + // and seek. + if len(c.batches) == 0 { + c.h = c.h[:0] + c.batches = c.batches[:0] + + for _, iter := range c.its { + if iter.Seek(t, size) { + c.h = append(c.h, iter) + continue + } + + if err := iter.Err(); err != nil { + c.currErr = err + return false + } + } + + heap.Init(&c.h) + } + + return c.buildNextBatch(size) +} + +func (c *mergeIterator) Next(size int) bool { + // Pop the last built batch in a way that doesn't extend the slice. + if len(c.batches) > 0 { + copy(c.batches, c.batches[1:]) + c.batches = c.batches[:len(c.batches)-1] + } + + return c.buildNextBatch(size) +} + +func (c *mergeIterator) nextBatchEndTime() int64 { + batch := &c.batches[0] + return batch.Timestamps[batch.Length-1] +} + +func (c *mergeIterator) buildNextBatch(size int) bool { + // All we need to do is get enough batches that our first batch's last entry + // is before all iterators next entry. + for len(c.h) > 0 && (len(c.batches) == 0 || c.nextBatchEndTime() >= c.h[0].AtTime()) { + c.nextBatchBuf[0] = c.h[0].Batch() + c.batchesBuf = mergeStreams(c.batches, c.nextBatchBuf[:], c.batchesBuf, size) + copy(c.batches[:len(c.batchesBuf)], c.batchesBuf) + c.batches = c.batches[:len(c.batchesBuf)] + + if c.h[0].Next(size) { + heap.Fix(&c.h, 0) + } else { + heap.Pop(&c.h) + } + } + + return len(c.batches) > 0 +} + +func (c *mergeIterator) AtTime() int64 { + return c.batches[0].Timestamps[0] +} + +func (c *mergeIterator) Batch() promchunk.Batch { + return c.batches[0] +} + +func (c *mergeIterator) Err() error { + return c.currErr +} + +type iteratorHeap []iterator + +func (h *iteratorHeap) Len() int { return len(*h) } +func (h *iteratorHeap) Swap(i, j int) { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] } + +func (h *iteratorHeap) Less(i, j int) bool { + iT := (*h)[i].AtTime() + jT := (*h)[j].AtTime() + return iT < jT +} + +func (h *iteratorHeap) Push(x interface{}) { + *h = append(*h, x.(iterator)) +} + +func (h *iteratorHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +// Build a list of lists of non-overlapping chunks. +func partitionChunks(cs []chunk.Chunk) [][]chunk.Chunk { + sort.Sort(byFrom(cs)) + + css := [][]chunk.Chunk{} +outer: + for _, c := range cs { + for i, cs := range css { + if cs[len(cs)-1].Through.Before(c.From) { + css[i] = append(css[i], c) + continue outer + } + } + cs := make([]chunk.Chunk, 0, len(cs)/(len(css)+1)) + cs = append(cs, c) + css = append(css, cs) + } + + return css +} + +type byFrom []chunk.Chunk + +func (b byFrom) Len() int { return len(b) } +func (b byFrom) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byFrom) Less(i, j int) bool { return b[i].From < b[j].From } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/non_overlapping.go b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/non_overlapping.go new file mode 100644 index 000000000000..67bf43dd69b2 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/non_overlapping.go @@ -0,0 +1,69 @@ +package batch + +import ( + "github.com/cortexproject/cortex/pkg/chunk" + promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" +) + +type nonOverlappingIterator struct { + curr int + chunks []chunk.Chunk + iter chunkIterator +} + +// newNonOverlappingIterator returns a single iterator over an slice of sorted, +// non-overlapping iterators. +func newNonOverlappingIterator(chunks []chunk.Chunk) *nonOverlappingIterator { + it := &nonOverlappingIterator{ + chunks: chunks, + } + it.iter.reset(it.chunks[0]) + return it +} + +func (it *nonOverlappingIterator) Seek(t int64, size int) bool { + for { + if it.iter.Seek(t, size) { + return true + } else if it.iter.Err() != nil { + return false + } else if !it.next() { + return false + } + } +} + +func (it *nonOverlappingIterator) Next(size int) bool { + for { + if it.iter.Next(size) { + return true + } else if it.iter.Err() != nil { + return false + } else if !it.next() { + return false + } + } +} + +func (it *nonOverlappingIterator) next() bool { + it.curr++ + if it.curr < len(it.chunks) { + it.iter.reset(it.chunks[it.curr]) + } + return it.curr < len(it.chunks) +} + +func (it *nonOverlappingIterator) AtTime() int64 { + return it.iter.AtTime() +} + +func (it *nonOverlappingIterator) Batch() promchunk.Batch { + return it.iter.Batch() +} + +func (it *nonOverlappingIterator) Err() error { + if it.curr < len(it.chunks) { + return it.iter.Err() + } + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/batch/stream.go b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/stream.go new file mode 100644 index 000000000000..66343b424c51 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/batch/stream.go @@ -0,0 +1,110 @@ +package batch + +import ( + promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" +) + +// batchStream deals with iteratoring through multiple, non-overlapping batches, +// and building new slices of non-overlapping batches. Designed to be used +// without allocations. +type batchStream []promchunk.Batch + +// reset, hasNext, next, atTime etc are all inlined in go1.11. + +func (bs *batchStream) reset() { + for i := range *bs { + (*bs)[i].Index = 0 + } +} + +func (bs *batchStream) hasNext() bool { + return len(*bs) > 0 +} + +func (bs *batchStream) next() { + (*bs)[0].Index++ + if (*bs)[0].Index >= (*bs)[0].Length { + *bs = (*bs)[1:] + } +} + +func (bs *batchStream) atTime() int64 { + return (*bs)[0].Timestamps[(*bs)[0].Index] +} + +func (bs *batchStream) at() (int64, float64) { + b := &(*bs)[0] + return b.Timestamps[b.Index], b.Values[b.Index] +} + +func mergeStreams(left, right batchStream, result batchStream, size int) batchStream { + // Reset the Index and Length of existing batches. + for i := range result { + result[i].Index = 0 + result[i].Length = 0 + } + resultLen := 1 // Number of batches in the final result. + b := &result[0] + + // This function adds a new batch to the result + // if the current batch being appended is full. + checkForFullBatch := func() { + if b.Index == size { + // The batch reached it intended size. + // Add another batch the the result + // and use it for further appending. + + // The Index is the place at which new sample + // has to be appended, hence it tells the length. + b.Length = b.Index + resultLen++ + if resultLen > len(result) { + // It is possible that result can grow longer + // then the one provided. + result = append(result, promchunk.Batch{}) + } + b = &result[resultLen-1] + } + } + + for left.hasNext() && right.hasNext() { + checkForFullBatch() + t1, t2 := left.atTime(), right.atTime() + if t1 < t2 { + b.Timestamps[b.Index], b.Values[b.Index] = left.at() + left.next() + } else if t1 > t2 { + b.Timestamps[b.Index], b.Values[b.Index] = right.at() + right.next() + } else { + b.Timestamps[b.Index], b.Values[b.Index] = left.at() + left.next() + right.next() + } + b.Index++ + } + + // This function adds all the samples from the provided + // batchStream into the result in the same order. + addToResult := func(bs batchStream) { + for ; bs.hasNext(); bs.next() { + checkForFullBatch() + b.Timestamps[b.Index], b.Values[b.Index] = bs.at() + b.Index++ + b.Length++ + } + } + + addToResult(left) + addToResult(right) + + // The Index is the place at which new sample + // has to be appended, hence it tells the length. + b.Length = b.Index + + // The provided 'result' slice might be bigger + // than the actual result, hence return the subslice. + result = result[:resultLen] + result.reset() + return result +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/block.go b/vendor/github.com/cortexproject/cortex/pkg/querier/block.go new file mode 100644 index 000000000000..169b0823458b --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/block.go @@ -0,0 +1,341 @@ +package querier + +import ( + "context" + "math" + "sort" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/weaveworks/common/logging" + "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/querier/series" + "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/spanlogger" +) + +// BlockQueryable is a storage.Queryable implementation for blocks storage +type BlockQueryable struct { + services.Service + + us *BucketStoresService +} + +// NewBlockQueryable returns a client to query a block store +func NewBlockQueryable(cfg tsdb.Config, logLevel logging.Level, registerer prometheus.Registerer) (*BlockQueryable, error) { + util.WarnExperimentalUse("Blocks storage engine") + bucketClient, err := tsdb.NewBucketClient(context.Background(), cfg, "cortex-bucket-stores", util.Logger) + if err != nil { + return nil, err + } + + if registerer != nil { + bucketClient = objstore.BucketWithMetrics( /* bucket label value */ "", bucketClient, prometheus.WrapRegistererWithPrefix("cortex_querier_", registerer)) + } + + us, err := NewBucketStoresService(cfg, bucketClient, logLevel, util.Logger, registerer) + if err != nil { + return nil, err + } + + b := &BlockQueryable{us: us} + b.Service = services.NewIdleService(b.starting, b.stopping) + + return b, nil +} + +func (b *BlockQueryable) starting(ctx context.Context) error { + return errors.Wrap(services.StartAndAwaitRunning(ctx, b.us), "failed to start BucketStoresService") +} + +func (b *BlockQueryable) stopping(_ error) error { + return errors.Wrap(services.StopAndAwaitTerminated(context.Background(), b.us), "stopping BucketStoresService") +} + +// Querier returns a new Querier on the storage. +func (b *BlockQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + if s := b.State(); s != services.Running { + return nil, promql.ErrStorage{Err: errors.Errorf("BlockQueryable is not running: %v", s)} + } + + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, promql.ErrStorage{Err: err} + } + + return &blocksQuerier{ + ctx: ctx, + mint: mint, + maxt: maxt, + userID: userID, + userStores: b.us, + }, nil +} + +type blocksQuerier struct { + ctx context.Context + mint, maxt int64 + userID string + userStores *BucketStoresService +} + +func (b *blocksQuerier) Select(sp *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { + return b.SelectSorted(sp, matchers...) +} + +func (b *blocksQuerier) SelectSorted(sp *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { + log, ctx := spanlogger.New(b.ctx, "blocksQuerier.Select") + defer log.Span.Finish() + + mint, maxt := b.mint, b.maxt + if sp != nil { + mint, maxt = sp.Start, sp.End + } + converted := convertMatchersToLabelMatcher(matchers) + + // Returned series are sorted. + // No processing of responses is done here. Dealing with multiple responses + // for the same series and overlapping chunks is done in blockQuerierSeriesSet. + series, warnings, err := b.userStores.Series(ctx, b.userID, &storepb.SeriesRequest{ + MinTime: mint, + MaxTime: maxt, + Matchers: converted, + PartialResponseStrategy: storepb.PartialResponseStrategy_ABORT, + }) + if err != nil { + return nil, nil, promql.ErrStorage{Err: err} + } + + return &blockQuerierSeriesSet{ + series: series, + }, warnings, nil +} + +func convertMatchersToLabelMatcher(matchers []*labels.Matcher) []storepb.LabelMatcher { + var converted []storepb.LabelMatcher + for _, m := range matchers { + var t storepb.LabelMatcher_Type + switch m.Type { + case labels.MatchEqual: + t = storepb.LabelMatcher_EQ + case labels.MatchNotEqual: + t = storepb.LabelMatcher_NEQ + case labels.MatchRegexp: + t = storepb.LabelMatcher_RE + case labels.MatchNotRegexp: + t = storepb.LabelMatcher_NRE + } + + converted = append(converted, storepb.LabelMatcher{ + Type: t, + Name: m.Name, + Value: m.Value, + }) + } + return converted +} + +func (b *blocksQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { + // Cortex doesn't use this. It will ask ingesters for metadata. + return nil, nil, errors.New("not implemented") +} + +func (b *blocksQuerier) LabelNames() ([]string, storage.Warnings, error) { + // Cortex doesn't use this. It will ask ingesters for metadata. + return nil, nil, errors.New("not implemented") +} + +func (b *blocksQuerier) Close() error { + // nothing to do here. + return nil +} + +// Implementation of storage.SeriesSet, based on individual responses from store client. +type blockQuerierSeriesSet struct { + series []*storepb.Series + + // next response to process + next int + + currLabels []storepb.Label + currChunks []storepb.AggrChunk +} + +func (bqss *blockQuerierSeriesSet) Next() bool { + bqss.currChunks = nil + bqss.currLabels = nil + + if bqss.next >= len(bqss.series) { + return false + } + + bqss.currLabels = bqss.series[bqss.next].Labels + bqss.currChunks = bqss.series[bqss.next].Chunks + + bqss.next++ + + // Merge chunks for current series. Chunks may come in multiple responses, but as soon + // as the response has chunks for a new series, we can stop searching. Series are sorted. + // See documentation for StoreClient.Series call for details. + for bqss.next < len(bqss.series) && storepb.CompareLabels(bqss.currLabels, bqss.series[bqss.next].Labels) == 0 { + bqss.currChunks = append(bqss.currChunks, bqss.series[bqss.next].Chunks...) + bqss.next++ + } + + return true +} + +func (bqss *blockQuerierSeriesSet) At() storage.Series { + if bqss.currLabels == nil { + return nil + } + + return newBlockQuerierSeries(bqss.currLabels, bqss.currChunks) +} + +func (bqss *blockQuerierSeriesSet) Err() error { + return nil +} + +func newBlockQuerierSeries(lbls []storepb.Label, chunks []storepb.AggrChunk) *blockQuerierSeries { + sort.Slice(chunks, func(i, j int) bool { + return chunks[i].MinTime < chunks[j].MinTime + }) + + b := labels.NewBuilder(nil) + for _, l := range lbls { + // Ignore external label set by the shipper + if l.Name != tsdb.TenantIDExternalLabel { + b.Set(l.Name, l.Value) + } + } + + return &blockQuerierSeries{labels: b.Labels(), chunks: chunks} +} + +type blockQuerierSeries struct { + labels labels.Labels + chunks []storepb.AggrChunk +} + +func (bqs *blockQuerierSeries) Labels() labels.Labels { + return bqs.labels +} + +func (bqs *blockQuerierSeries) Iterator() storage.SeriesIterator { + if len(bqs.chunks) == 0 { + // should not happen in practice, but we have a unit test for it + return series.NewErrIterator(errors.New("no chunks")) + } + + its := make([]chunkenc.Iterator, 0, len(bqs.chunks)) + + for _, c := range bqs.chunks { + ch, err := chunkenc.FromData(chunkenc.EncXOR, c.Raw.Data) + if err != nil { + return series.NewErrIterator(errors.Wrapf(err, "failed to initialize chunk from XOR encoded raw data (series: %v min time: %d max time: %d)", bqs.Labels(), c.MinTime, c.MaxTime)) + } + + it := ch.Iterator(nil) + its = append(its, it) + } + + return newBlockQuerierSeriesIterator(bqs.Labels(), its) +} + +func newBlockQuerierSeriesIterator(labels labels.Labels, its []chunkenc.Iterator) *blockQuerierSeriesIterator { + return &blockQuerierSeriesIterator{labels: labels, iterators: its, lastT: math.MinInt64} +} + +// blockQuerierSeriesIterator implements a series iterator on top +// of a list of time-sorted, non-overlapping chunks. +type blockQuerierSeriesIterator struct { + // only used for error reporting + labels labels.Labels + + iterators []chunkenc.Iterator + i int + lastT int64 +} + +func (it *blockQuerierSeriesIterator) Seek(t int64) bool { + // We generally expect the chunks already to be cut down + // to the range we are interested in. There's not much to be gained from + // hopping across chunks so we just call next until we reach t. + for { + ct, _ := it.At() + if ct >= t { + return true + } + if !it.Next() { + return false + } + } +} + +func (it *blockQuerierSeriesIterator) At() (int64, float64) { + if it.i >= len(it.iterators) { + return 0, 0 + } + + t, v := it.iterators[it.i].At() + it.lastT = t + return t, v +} + +func (it *blockQuerierSeriesIterator) Next() bool { + if it.i >= len(it.iterators) { + return false + } + + if it.iterators[it.i].Next() { + return true + } + if it.iterators[it.i].Err() != nil { + return false + } + + for { + it.i++ + + if it.i >= len(it.iterators) { + return false + } + + // we must advance iterator first, to see if it has any samples. + // Seek will call At() as its first operation. + if !it.iterators[it.i].Next() { + if it.iterators[it.i].Err() != nil { + return false + } + + // Found empty iterator without error, skip it. + continue + } + + // Chunks are guaranteed to be ordered but not generally guaranteed to not overlap. + // We must ensure to skip any overlapping range between adjacent chunks. + return it.Seek(it.lastT + 1) + } +} + +func (it *blockQuerierSeriesIterator) Err() error { + if it.i >= len(it.iterators) { + return nil + } + + err := it.iterators[it.i].Err() + if err != nil { + return promql.ErrStorage{Err: errors.Wrapf(err, "cannot iterate chunk for series: %v", it.labels)} + } + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_bucket_stores_service.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_bucket_stores_service.go new file mode 100644 index 000000000000..da04c54c85b6 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_bucket_stores_service.go @@ -0,0 +1,121 @@ +package querier + +import ( + "context" + "io" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/storage" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/runutil" + "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/weaveworks/common/logging" + "google.golang.org/grpc/metadata" + + "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/storegateway" + "github.com/cortexproject/cortex/pkg/util/services" +) + +// BucketStoresService wraps BucketStores into a service which triggers both the initial +// sync at startup and a periodic sync honoring configured the sync interval. +type BucketStoresService struct { + services.Service + + cfg tsdb.Config + logger log.Logger + stores *storegateway.BucketStores +} + +func NewBucketStoresService(cfg tsdb.Config, bucketClient objstore.Bucket, logLevel logging.Level, logger log.Logger, registerer prometheus.Registerer) (*BucketStoresService, error) { + var storesReg prometheus.Registerer + if registerer != nil { + storesReg = prometheus.WrapRegistererWithPrefix("cortex_querier_", registerer) + } + + stores, err := storegateway.NewBucketStores(cfg, nil, bucketClient, logLevel, logger, storesReg) + if err != nil { + return nil, err + } + + s := &BucketStoresService{ + cfg: cfg, + stores: stores, + logger: logger, + } + + s.Service = services.NewBasicService(s.starting, s.syncStoresLoop, nil) + + return s, nil +} + +func (s *BucketStoresService) starting(ctx context.Context) error { + if s.cfg.BucketStore.SyncInterval > 0 { + // Run an initial blocks sync, required in order to be able to serve queries. + if err := s.stores.InitialSync(ctx); err != nil { + return err + } + } + + return nil +} + +// syncStoresLoop periodically calls SyncBlocks() to synchronize the blocks for all tenants. +func (s *BucketStoresService) syncStoresLoop(ctx context.Context) error { + // If the sync is disabled we never sync blocks, which means the bucket store + // will be empty and no series will be returned once queried. + if s.cfg.BucketStore.SyncInterval <= 0 { + <-ctx.Done() + return nil + } + + syncInterval := s.cfg.BucketStore.SyncInterval + + // Since we've just run the initial sync, we should wait the next + // sync interval before resynching. + select { + case <-ctx.Done(): + return nil + case <-time.After(syncInterval): + } + + err := runutil.Repeat(syncInterval, ctx.Done(), func() error { + level.Info(s.logger).Log("msg", "synchronizing TSDB blocks for all users") + if err := s.stores.SyncBlocks(ctx); err != nil && err != io.EOF { + level.Warn(s.logger).Log("msg", "failed to synchronize TSDB blocks", "err", err) + } else { + level.Info(s.logger).Log("msg", "successfully synchronized TSDB blocks for all users") + } + + return nil + }) + + // This should never occur because the rununtil.Repeat() returns error + // only if the callback function returns error (which doesn't), but since + // we have to handle the error because of the linter, it's better to log it. + return errors.Wrap(err, "blocks synchronization has been halted due to an unexpected error") +} + +// Series makes a series request to the underlying user bucket store. +func (s *BucketStoresService) Series(ctx context.Context, userID string, req *storepb.SeriesRequest) ([]*storepb.Series, storage.Warnings, error) { + // Inject the user ID into the context metadata, as expected by BucketStores. + ctx = setUserIDToGRPCContext(ctx, userID) + + srv := storegateway.NewBucketStoreSeriesServer(ctx) + err := s.stores.Series(req, srv) + if err != nil { + return nil, nil, err + } + + return srv.SeriesSet, srv.Warnings, nil +} + +func setUserIDToGRPCContext(ctx context.Context, userID string) context.Context { + // We have to store it in the incoming metadata because we have to emulate the + // case it's coming from a gRPC request, while here we're running everything in-memory. + return metadata.NewIncomingContext(ctx, metadata.Pairs(tsdb.TenantIDExternalLabel, userID)) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_scanner.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_scanner.go new file mode 100644 index 000000000000..58e9889ca6fa --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_scanner.go @@ -0,0 +1,321 @@ +package querier + +import ( + "context" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/objstore" + + cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/storegateway" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" +) + +var ( + errBlocksScannerNotRunning = errors.New("blocks scanner is not running") + errInvalidBlocksRange = errors.New("invalid blocks time range") +) + +type BlocksScannerConfig struct { + ScanInterval time.Duration + TenantsConcurrency int + MetasConcurrency int + CacheDir string + ConsistencyDelay time.Duration + IgnoreDeletionMarksDelay time.Duration +} + +type BlocksScanner struct { + services.Service + + cfg BlocksScannerConfig + logger log.Logger + bucketClient objstore.Bucket + fetchersMetrics *storegateway.MetadataFetcherMetrics + + // We reuse the metadata fetcher instance for a given tenant both because of performance + // reasons (the fetcher keeps a in-memory cache) and being able to collect and group metrics. + fetchersMx sync.Mutex + fetchers map[string]block.MetadataFetcher + + // Keep the per-tenant metas found during the last run. + metasMx sync.RWMutex + metas map[string][]*metadata.Meta + + scanDuration prometheus.Histogram +} + +func NewBlocksScanner(cfg BlocksScannerConfig, bucketClient objstore.Bucket, logger log.Logger, reg prometheus.Registerer) *BlocksScanner { + d := &BlocksScanner{ + cfg: cfg, + logger: logger, + bucketClient: bucketClient, + fetchers: make(map[string]block.MetadataFetcher), + metas: make(map[string][]*metadata.Meta), + fetchersMetrics: storegateway.NewMetadataFetcherMetrics(), + scanDuration: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_querier_blocks_scan_duration_seconds", + Help: "The total time it takes to run a full blocks scan across the storage.", + Buckets: []float64{1, 10, 20, 30, 60, 120, 180, 240, 300, 600}, + }), + } + + if reg != nil { + prometheus.WrapRegistererWithPrefix("cortex_querier_", reg).MustRegister(d.fetchersMetrics) + } + + d.Service = services.NewTimerService(cfg.ScanInterval, d.starting, d.scan, nil) + + return d +} + +// GetBlocks returns known blocks for userID containing samples within the range minT +// and maxT (milliseconds, both included). Returned blocks are sorted by MaxTime descending. +func (d *BlocksScanner) GetBlocks(userID string, minT, maxT int64) ([]*metadata.Meta, error) { + // We need to ensure the initial full bucket scan succeeded. + if d.State() != services.Running { + return nil, errBlocksScannerNotRunning + } + if maxT < minT { + return nil, errInvalidBlocksRange + } + + d.metasMx.RLock() + defer d.metasMx.RUnlock() + + userMetas, ok := d.metas[userID] + if !ok { + return nil, nil + } + + // Given we do expect the large majority of queries to have a time range close + // to "now", we're going to find matching blocks iterating the list in reverse order. + var matchingMetas []*metadata.Meta + for i := len(userMetas) - 1; i >= 0; i-- { + // NOTE: Block intervals are half-open: [MinTime, MaxTime). + if userMetas[i].MinTime <= maxT && minT < userMetas[i].MaxTime { + matchingMetas = append(matchingMetas, userMetas[i]) + } + + // We can safely break the loop because metas are sorted by MaxTime. + if userMetas[i].MaxTime <= minT { + break + } + } + + return matchingMetas, nil +} + +func (d *BlocksScanner) starting(ctx context.Context) error { + // Before the service is in the running state it must have successfully + // complete the initial scan. + return d.scanBucket(ctx) +} + +func (d *BlocksScanner) scan(ctx context.Context) error { + if err := d.scanBucket(ctx); err != nil { + level.Error(d.logger).Log("msg", "failed to scan bucket storage to find blocks", "err", err) + } + + // Never return error, otherwise the service terminates. + return nil +} + +func (d *BlocksScanner) scanBucket(ctx context.Context) error { + defer func(start time.Time) { + d.scanDuration.Observe(time.Since(start).Seconds()) + }(time.Now()) + + jobsChan := make(chan string) + resMx := sync.Mutex{} + resMetas := map[string][]*metadata.Meta{} + resErrs := tsdb_errors.MultiError{} + + // Create a pool of workers which will synchronize metas. The pool size + // is limited in order to avoid to concurrently sync a lot of tenants in + // a large cluster. + wg := &sync.WaitGroup{} + wg.Add(d.cfg.TenantsConcurrency) + + for i := 0; i < d.cfg.TenantsConcurrency; i++ { + go func() { + defer wg.Done() + + for userID := range jobsChan { + metas, err := d.scanUserBlocksWithRetries(ctx, userID) + + resMx.Lock() + if err != nil { + resErrs.Add(err) + } else { + resMetas[userID] = metas + } + resMx.Unlock() + } + }() + } + + // Iterate the bucket to discover users. + err := d.bucketClient.Iter(ctx, "", func(s string) error { + userID := strings.TrimSuffix(s, "/") + select { + case jobsChan <- userID: + return nil + case <-ctx.Done(): + return ctx.Err() + } + }) + + if err != nil { + resMx.Lock() + resErrs.Add(err) + resMx.Unlock() + } + + // Wait until all workers completed. + close(jobsChan) + wg.Wait() + + d.metasMx.Lock() + if len(resErrs) == 0 { + // Replace the map, so that we discard tenants fully deleted from storage. + d.metas = resMetas + } else { + // If an error occurred, we prefer to partially update the metas map instead of + // not updating it at all. At least we'll update blocks for the successful tenants. + for userID, metas := range resMetas { + d.metas[userID] = metas + } + } + d.metasMx.Unlock() + + return resErrs.Err() +} + +// scanUserBlocksWithRetries runs scanUserBlocks() retrying multiple times +// in case of error. +func (d *BlocksScanner) scanUserBlocksWithRetries(ctx context.Context, userID string) (metas []*metadata.Meta, err error) { + retries := util.NewBackoff(ctx, util.BackoffConfig{ + MinBackoff: time.Second, + MaxBackoff: 30 * time.Second, + MaxRetries: 3, + }) + + for retries.Ongoing() { + metas, err = d.scanUserBlocks(ctx, userID) + if err == nil { + return + } + + retries.Wait() + } + + return +} + +func (d *BlocksScanner) scanUserBlocks(ctx context.Context, userID string) ([]*metadata.Meta, error) { + fetcher, err := d.getOrCreateMetaFetcher(userID) + if err != nil { + return nil, errors.Wrapf(err, "create meta fetcher for user %s", userID) + } + + metas, partials, err := fetcher.Fetch(ctx) + if err != nil { + return nil, errors.Wrapf(err, "scan blocks for user %s", userID) + } + + // In case we've found any partial block we log about it but continue cause we don't want + // to break the scanner just because there's a spurious block. + if len(partials) > 0 { + logPartialBlocks(userID, partials, d.logger) + } + + return sortMetasByMaxTime(metas), nil +} + +func (d *BlocksScanner) getOrCreateMetaFetcher(userID string) (block.MetadataFetcher, error) { + d.fetchersMx.Lock() + defer d.fetchersMx.Unlock() + + if fetcher, ok := d.fetchers[userID]; ok { + return fetcher, nil + } + + fetcher, err := d.createMetaFetcher(userID) + if err != nil { + return nil, err + } + + d.fetchers[userID] = fetcher + return fetcher, nil +} + +func (d *BlocksScanner) createMetaFetcher(userID string) (block.MetadataFetcher, error) { + userLogger := util.WithUserID(userID, d.logger) + userBucket := cortex_tsdb.NewUserBucketClient(userID, d.bucketClient) + userReg := prometheus.NewRegistry() + + var filters []block.MetadataFilter + // TODO(pracucci) I'm dubious we actually need NewConsistencyDelayMetaFilter here. I think we should remove it and move the + // consistency delay upwards, where we do the consistency check in the querier. + filters = append(filters, block.NewConsistencyDelayMetaFilter(userLogger, d.cfg.ConsistencyDelay, userReg)) + filters = append(filters, block.NewIgnoreDeletionMarkFilter(userLogger, userBucket, d.cfg.IgnoreDeletionMarksDelay)) + // TODO(pracucci) is this problematic due to the consistency check? + filters = append(filters, block.NewDeduplicateFilter()) + + f, err := block.NewMetaFetcher( + userLogger, + d.cfg.MetasConcurrency, + userBucket, + // The fetcher stores cached metas in the "meta-syncer/" sub directory. + filepath.Join(d.cfg.CacheDir, userID), + userReg, + filters, + nil, + ) + if err != nil { + return nil, err + } + + d.fetchersMetrics.AddUserRegistry(userID, userReg) + return f, nil +} + +func sortMetasByMaxTime(metas map[ulid.ULID]*metadata.Meta) []*metadata.Meta { + sorted := make([]*metadata.Meta, 0, len(metas)) + for _, m := range metas { + sorted = append(sorted, m) + } + + sort.Slice(sorted, func(i, j int) bool { + return sorted[i].MaxTime < sorted[j].MaxTime + }) + + return sorted +} + +func logPartialBlocks(userID string, partials map[ulid.ULID]error, logger log.Logger) { + ids := make([]string, 0, len(partials)) + errs := make([]string, 0, len(partials)) + + for id, err := range partials { + ids = append(ids, id.String()) + errs = append(errs, err.Error()) + } + + level.Warn(logger).Log("msg", "found partial blocks", "user", userID, "blocks", strings.Join(ids, ","), "err", strings.Join(errs, ",")) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go new file mode 100644 index 000000000000..c60619cffcd8 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go @@ -0,0 +1,110 @@ +package querier + +import ( + "context" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/querier/chunkstore" + seriesset "github.com/cortexproject/cortex/pkg/querier/series" +) + +type chunkIteratorFunc func(chunks []chunk.Chunk, from, through model.Time) storage.SeriesIterator + +func newChunkStoreQueryable(store chunkstore.ChunkStore, chunkIteratorFunc chunkIteratorFunc) storage.Queryable { + return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return &chunkStoreQuerier{ + store: store, + chunkIteratorFunc: chunkIteratorFunc, + ctx: ctx, + mint: mint, + maxt: maxt, + }, nil + }) +} + +type chunkStoreQuerier struct { + store chunkstore.ChunkStore + chunkIteratorFunc chunkIteratorFunc + ctx context.Context + mint, maxt int64 +} + +func (q *chunkStoreQuerier) SelectSorted(sp *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { + userID, err := user.ExtractOrgID(q.ctx) + if err != nil { + return nil, nil, err + } + chunks, err := q.store.Get(q.ctx, userID, model.Time(sp.Start), model.Time(sp.End), matchers...) + if err != nil { + return nil, nil, promql.ErrStorage{Err: err} + } + + return partitionChunks(chunks, q.mint, q.maxt, q.chunkIteratorFunc), nil, nil +} + +func (q *chunkStoreQuerier) Select(sp *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { + return q.SelectSorted(sp, matchers...) +} + +// Series in the returned set are sorted alphabetically by labels. +func partitionChunks(chunks []chunk.Chunk, mint, maxt int64, iteratorFunc chunkIteratorFunc) storage.SeriesSet { + chunksBySeries := map[model.Fingerprint][]chunk.Chunk{} + for _, c := range chunks { + fp := client.Fingerprint(c.Metric) + chunksBySeries[fp] = append(chunksBySeries[fp], c) + } + + series := make([]storage.Series, 0, len(chunksBySeries)) + for i := range chunksBySeries { + series = append(series, &chunkSeries{ + labels: chunksBySeries[i][0].Metric, + chunks: chunksBySeries[i], + chunkIteratorFunc: iteratorFunc, + mint: mint, + maxt: maxt, + }) + } + + return seriesset.NewConcreteSeriesSet(series) +} + +func (q *chunkStoreQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { + return nil, nil, nil +} + +func (q *chunkStoreQuerier) LabelNames() ([]string, storage.Warnings, error) { + return nil, nil, nil +} + +func (q *chunkStoreQuerier) Close() error { + return nil +} + +// Implements SeriesWithChunks +type chunkSeries struct { + labels labels.Labels + chunks []chunk.Chunk + chunkIteratorFunc chunkIteratorFunc + mint, maxt int64 +} + +func (s *chunkSeries) Labels() labels.Labels { + return s.labels +} + +// Iterator returns a new iterator of the data of the series. +func (s *chunkSeries) Iterator() storage.SeriesIterator { + return s.chunkIteratorFunc(s.chunks, model.Time(s.mint), model.Time(s.maxt)) +} + +// Chunks implements SeriesWithChunks interface. +func (s *chunkSeries) Chunks() []chunk.Chunk { + return s.chunks +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/chunks_handler.go b/vendor/github.com/cortexproject/cortex/pkg/querier/chunks_handler.go new file mode 100644 index 000000000000..1cff7f676d9a --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/chunks_handler.go @@ -0,0 +1,93 @@ +package querier + +import ( + "archive/tar" + "compress/gzip" + "net/http" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/querier/chunkstore" + "github.com/cortexproject/cortex/pkg/util" +) + +// ChunksHandler allows you to fetch a compressed tar of all the chunks for a +// given time range and set of matchers. +// Only works with the new unified chunk querier, which is enabled when you turn +// on ingester chunk query streaming. +func ChunksHandler(queryable storage.Queryable) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + userID, err := user.ExtractOrgID(r.Context()) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + mint, err := util.ParseTime(r.FormValue("start")) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + maxt, err := util.ParseTime(r.FormValue("end")) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + matchers, err := promql.ParseMetricSelector(r.FormValue("matcher")) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + querier, err := queryable.Querier(r.Context(), mint, maxt) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + store, ok := querier.(chunkstore.ChunkStore) + if !ok { + http.Error(w, "not supported", http.StatusServiceUnavailable) + return + } + + chunks, err := store.Get(r.Context(), userID, model.Time(mint), model.Time(maxt), matchers...) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Add("Content-Type", "application/tar+gzip") + gw := gzip.NewWriter(w) + defer gw.Close() + + writer := tar.NewWriter(gw) + defer writer.Close() + + for _, chunk := range chunks { + buf, err := chunk.Encoded() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if err := writer.WriteHeader(&tar.Header{ + Name: chunk.ExternalKey(), + Size: int64(len(buf)), + Mode: 0600, + }); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if _, err := writer.Write(buf); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + }) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go new file mode 100644 index 000000000000..57a5d02179ca --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go @@ -0,0 +1,135 @@ +package querier + +import ( + "context" + "sort" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/prom1/storage/metric" + "github.com/cortexproject/cortex/pkg/querier/series" + "github.com/cortexproject/cortex/pkg/util/chunkcompat" +) + +// Distributor is the read interface to the distributor, made an interface here +// to reduce package coupling. +type Distributor interface { + Query(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (model.Matrix, error) + QueryStream(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (*client.QueryStreamResponse, error) + LabelValuesForLabelName(context.Context, model.LabelName) ([]string, error) + LabelNames(context.Context) ([]string, error) + MetricsForLabelMatchers(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]metric.Metric, error) +} + +func newDistributorQueryable(distributor Distributor, streaming bool, iteratorFn chunkIteratorFunc) storage.Queryable { + return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return &distributorQuerier{ + distributor: distributor, + ctx: ctx, + mint: mint, + maxt: maxt, + streaming: streaming, + chunkIterFn: iteratorFn, + }, nil + }) +} + +type distributorQuerier struct { + distributor Distributor + ctx context.Context + mint, maxt int64 + streaming bool + chunkIterFn chunkIteratorFunc +} + +func (q *distributorQuerier) SelectSorted(sp *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { + // Kludge: Prometheus passes nil SelectParams if it is doing a 'series' operation, + // which needs only metadata. + if sp == nil { + ms, err := q.distributor.MetricsForLabelMatchers(q.ctx, model.Time(q.mint), model.Time(q.maxt), matchers...) + if err != nil { + return nil, nil, err + } + return series.MetricsToSeriesSet(ms), nil, nil + } + + mint, maxt := sp.Start, sp.End + + if q.streaming { + return q.streamingSelect(*sp, matchers) + } + + matrix, err := q.distributor.Query(q.ctx, model.Time(mint), model.Time(maxt), matchers...) + if err != nil { + return nil, nil, promql.ErrStorage{Err: err} + } + + // Using MatrixToSeriesSet (and in turn NewConcreteSeriesSet), sorts the series. + return series.MatrixToSeriesSet(matrix), nil, nil +} + +func (q *distributorQuerier) Select(sp *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { + return q.SelectSorted(sp, matchers...) +} + +func (q *distributorQuerier) streamingSelect(sp storage.SelectParams, matchers []*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { + userID, err := user.ExtractOrgID(q.ctx) + if err != nil { + return nil, nil, promql.ErrStorage{Err: err} + } + + mint, maxt := sp.Start, sp.End + + results, err := q.distributor.QueryStream(q.ctx, model.Time(mint), model.Time(maxt), matchers...) + if err != nil { + return nil, nil, promql.ErrStorage{Err: err} + } + + if len(results.Timeseries) != 0 { + return newTimeSeriesSeriesSet(results.Timeseries), nil, nil + } + + serieses := make([]storage.Series, 0, len(results.Chunkseries)) + for _, result := range results.Chunkseries { + // Sometimes the ingester can send series that have no data. + if len(result.Chunks) == 0 { + continue + } + + ls := client.FromLabelAdaptersToLabels(result.Labels) + sort.Sort(ls) + + chunks, err := chunkcompat.FromChunks(userID, ls, result.Chunks) + if err != nil { + return nil, nil, promql.ErrStorage{Err: err} + } + + series := &chunkSeries{ + labels: ls, + chunks: chunks, + chunkIteratorFunc: q.chunkIterFn, + } + serieses = append(serieses, series) + } + + return series.NewConcreteSeriesSet(serieses), nil, nil +} + +func (q *distributorQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { + lv, err := q.distributor.LabelValuesForLabelName(q.ctx, model.LabelName(name)) + return lv, nil, err +} + +func (q *distributorQuerier) LabelNames() ([]string, storage.Warnings, error) { + ln, err := q.distributor.LabelNames(q.ctx) + return ln, nil, err +} + +func (q *distributorQuerier) Close() error { + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/dummy.go b/vendor/github.com/cortexproject/cortex/pkg/querier/dummy.go new file mode 100644 index 000000000000..609eebe853b3 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/dummy.go @@ -0,0 +1,43 @@ +package querier + +import ( + "net/url" + + "github.com/prometheus/prometheus/rules" + "github.com/prometheus/prometheus/scrape" +) + +// DummyTargetRetriever implements github.com/prometheus/prometheus/web/api/v1.targetRetriever. +type DummyTargetRetriever struct{} + +// TargetsActive implements targetRetriever. +func (DummyTargetRetriever) TargetsActive() map[string][]*scrape.Target { + return map[string][]*scrape.Target{} +} + +// TargetsDropped implements targetRetriever. +func (DummyTargetRetriever) TargetsDropped() map[string][]*scrape.Target { + return map[string][]*scrape.Target{} +} + +// DummyAlertmanagerRetriever implements AlertmanagerRetriever. +type DummyAlertmanagerRetriever struct{} + +// Alertmanagers implements AlertmanagerRetriever. +func (DummyAlertmanagerRetriever) Alertmanagers() []*url.URL { return nil } + +// DroppedAlertmanagers implements AlertmanagerRetriever. +func (DummyAlertmanagerRetriever) DroppedAlertmanagers() []*url.URL { return nil } + +// DummyRulesRetriever implements RulesRetriever. +type DummyRulesRetriever struct{} + +// RuleGroups implements RulesRetriever. +func (DummyRulesRetriever) RuleGroups() []*rules.Group { + return nil +} + +// AlertingRules implements RulesRetriever. +func (DummyRulesRetriever) AlertingRules() []*rules.AlertingRule { + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/iterators/chunk_iterator.go b/vendor/github.com/cortexproject/cortex/pkg/querier/iterators/chunk_iterator.go new file mode 100644 index 000000000000..0f8f7347e164 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/iterators/chunk_iterator.go @@ -0,0 +1,64 @@ +package iterators + +import ( + "github.com/prometheus/common/model" + + "github.com/cortexproject/cortex/pkg/chunk" + promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" +) + +type chunkIterator struct { + chunk.Chunk + it promchunk.Iterator + + // At() is called often in the heap code, so caching its result seems like + // a good idea. + cacheValid bool + cachedTime int64 + cachedValue float64 +} + +// Seek advances the iterator forward to the value at or after +// the given timestamp. +func (i *chunkIterator) Seek(t int64) bool { + i.cacheValid = false + + // We assume seeks only care about a specific window; if this chunk doesn't + // contain samples in that window, we can shortcut. + if int64(i.Through) < t { + return false + } + + return i.it.FindAtOrAfter(model.Time(t)) +} + +func (i *chunkIterator) AtTime() int64 { + if i.cacheValid { + return i.cachedTime + } + + v := i.it.Value() + i.cachedTime, i.cachedValue = int64(v.Timestamp), float64(v.Value) + i.cacheValid = true + return i.cachedTime +} + +func (i *chunkIterator) At() (int64, float64) { + if i.cacheValid { + return i.cachedTime, i.cachedValue + } + + v := i.it.Value() + i.cachedTime, i.cachedValue = int64(v.Timestamp), float64(v.Value) + i.cacheValid = true + return i.cachedTime, i.cachedValue +} + +func (i *chunkIterator) Next() bool { + i.cacheValid = false + return i.it.Scan() +} + +func (i *chunkIterator) Err() error { + return i.it.Err() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/iterators/chunk_merge_iterator.go b/vendor/github.com/cortexproject/cortex/pkg/querier/iterators/chunk_merge_iterator.go new file mode 100644 index 000000000000..9ef70ad7d864 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/iterators/chunk_merge_iterator.go @@ -0,0 +1,208 @@ +package iterators + +import ( + "container/heap" + "sort" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/storage" + + "github.com/cortexproject/cortex/pkg/chunk" +) + +type chunkMergeIterator struct { + its []*nonOverlappingIterator + h seriesIteratorHeap + + currTime int64 + currValue float64 + currErr error +} + +// NewChunkMergeIterator creates a storage.SeriesIterator for a set of chunks. +func NewChunkMergeIterator(cs []chunk.Chunk, _, _ model.Time) storage.SeriesIterator { + its := buildIterators(cs) + c := &chunkMergeIterator{ + currTime: -1, + its: its, + h: make(seriesIteratorHeap, 0, len(its)), + } + + for _, iter := range c.its { + if iter.Next() { + c.h = append(c.h, iter) + continue + } + + if err := iter.Err(); err != nil { + c.currErr = err + } + } + + heap.Init(&c.h) + return c +} + +// Build a list of lists of non-overlapping chunk iterators. +func buildIterators(cs []chunk.Chunk) []*nonOverlappingIterator { + chunks := make([]*chunkIterator, len(cs)) + for i := range cs { + chunks[i] = &chunkIterator{ + Chunk: cs[i], + it: cs[i].Data.NewIterator(nil), + } + } + sort.Sort(byFrom(chunks)) + + chunkLists := [][]*chunkIterator{} +outer: + for _, chunk := range chunks { + for i, chunkList := range chunkLists { + if chunkList[len(chunkList)-1].Through.Before(chunk.From) { + chunkLists[i] = append(chunkLists[i], chunk) + continue outer + } + } + chunkLists = append(chunkLists, []*chunkIterator{chunk}) + } + + its := make([]*nonOverlappingIterator, 0, len(chunkLists)) + for _, chunkList := range chunkLists { + its = append(its, newNonOverlappingIterator(chunkList)) + } + return its +} + +func (c *chunkMergeIterator) Seek(t int64) bool { + c.h = c.h[:0] + + for _, iter := range c.its { + if iter.Seek(t) { + c.h = append(c.h, iter) + continue + } + + if err := iter.Err(); err != nil { + c.currErr = err + return false + } + } + + heap.Init(&c.h) + + if len(c.h) > 0 { + c.currTime, c.currValue = c.h[0].At() + return true + } + + return false +} + +func (c *chunkMergeIterator) Next() bool { + if len(c.h) == 0 { + return false + } + + lastTime := c.currTime + for c.currTime == lastTime && len(c.h) > 0 { + c.currTime, c.currValue = c.h[0].At() + + if c.h[0].Next() { + heap.Fix(&c.h, 0) + continue + } + + iter := heap.Pop(&c.h).(storage.SeriesIterator) + if err := iter.Err(); err != nil { + c.currErr = err + return false + } + } + + return c.currTime != lastTime +} + +func (c *chunkMergeIterator) At() (t int64, v float64) { + return c.currTime, c.currValue +} + +func (c *chunkMergeIterator) Err() error { + return c.currErr +} + +type extraIterator interface { + storage.SeriesIterator + AtTime() int64 +} + +type seriesIteratorHeap []extraIterator + +func (h *seriesIteratorHeap) Len() int { return len(*h) } +func (h *seriesIteratorHeap) Swap(i, j int) { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] } + +func (h *seriesIteratorHeap) Less(i, j int) bool { + iT := (*h)[i].AtTime() + jT := (*h)[j].AtTime() + return iT < jT +} + +func (h *seriesIteratorHeap) Push(x interface{}) { + *h = append(*h, x.(extraIterator)) +} + +func (h *seriesIteratorHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +type byFrom []*chunkIterator + +func (b byFrom) Len() int { return len(b) } +func (b byFrom) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byFrom) Less(i, j int) bool { return b[i].From < b[j].From } + +type nonOverlappingIterator struct { + curr int + chunks []*chunkIterator +} + +// newNonOverlappingIterator returns a single iterator over an slice of sorted, +// non-overlapping iterators. +func newNonOverlappingIterator(chunks []*chunkIterator) *nonOverlappingIterator { + return &nonOverlappingIterator{ + chunks: chunks, + } +} + +func (it *nonOverlappingIterator) Seek(t int64) bool { + for ; it.curr < len(it.chunks); it.curr++ { + if it.chunks[it.curr].Seek(t) { + return true + } + } + + return false +} + +func (it *nonOverlappingIterator) Next() bool { + for it.curr < len(it.chunks) && !it.chunks[it.curr].Next() { + it.curr++ + } + + return it.curr < len(it.chunks) +} + +func (it *nonOverlappingIterator) AtTime() int64 { + return it.chunks[it.curr].AtTime() +} + +func (it *nonOverlappingIterator) At() (int64, float64) { + return it.chunks[it.curr].At() +} + +func (it *nonOverlappingIterator) Err() error { + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/matrix.go b/vendor/github.com/cortexproject/cortex/pkg/querier/matrix.go new file mode 100644 index 000000000000..8ac963ad5ace --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/matrix.go @@ -0,0 +1,25 @@ +package querier + +import ( + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/storage" + + "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/querier/series" + "github.com/cortexproject/cortex/pkg/util" +) + +func mergeChunks(chunks []chunk.Chunk, from, through model.Time) storage.SeriesIterator { + samples := make([][]model.SamplePair, 0, len(chunks)) + for _, c := range chunks { + ss, err := c.Samples(from, through) + if err != nil { + return series.NewErrIterator(err) + } + + samples = append(samples, ss) + } + + merged := util.MergeNSampleSets(samples...) + return series.NewConcreteSeriesIterator(series.NewConcreteSeries(nil, merged)) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go new file mode 100644 index 000000000000..bd0cec1545d1 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go @@ -0,0 +1,369 @@ +package querier + +import ( + "context" + "errors" + "flag" + "time" + + "github.com/cortexproject/cortex/pkg/chunk/purger" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/querier/batch" + "github.com/cortexproject/cortex/pkg/querier/chunkstore" + "github.com/cortexproject/cortex/pkg/querier/iterators" + "github.com/cortexproject/cortex/pkg/querier/lazyquery" + "github.com/cortexproject/cortex/pkg/querier/series" + "github.com/cortexproject/cortex/pkg/util" +) + +// Config contains the configuration require to create a querier +type Config struct { + MaxConcurrent int `yaml:"max_concurrent"` + Timeout time.Duration `yaml:"timeout"` + Iterators bool `yaml:"iterators"` + BatchIterators bool `yaml:"batch_iterators"` + IngesterStreaming bool `yaml:"ingester_streaming"` + MaxSamples int `yaml:"max_samples"` + QueryIngestersWithin time.Duration `yaml:"query_ingesters_within"` + + // QueryStoreAfter the time after which queries should also be sent to the store and not just ingesters. + QueryStoreAfter time.Duration `yaml:"query_store_after"` + MaxQueryIntoFuture time.Duration `yaml:"max_query_into_future"` + + // The default evaluation interval for the promql engine. + // Needs to be configured for subqueries to work as it is the default + // step if not specified. + DefaultEvaluationInterval time.Duration `yaml:"default_evaluation_interval"` + + // Directory for ActiveQueryTracker. If empty, ActiveQueryTracker will be disabled and MaxConcurrent will not be applied (!). + // ActiveQueryTracker logs queries that were active during the last crash, but logs them on the next startup. + // However, we need to use active query tracker, otherwise we cannot limit Max Concurrent queries in the PromQL + // engine. + ActiveQueryTrackerDir string `yaml:"active_query_tracker_dir"` +} + +var ( + errBadLookbackConfigs = errors.New("bad settings, query_store_after >= query_ingesters_within which can result in queries not being sent") +) + +// RegisterFlags adds the flags required to config this to the given FlagSet. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.IntVar(&cfg.MaxConcurrent, "querier.max-concurrent", 20, "The maximum number of concurrent queries.") + f.DurationVar(&cfg.Timeout, "querier.timeout", 2*time.Minute, "The timeout for a query.") + if f.Lookup("promql.lookback-delta") == nil { + f.DurationVar(&promql.LookbackDelta, "promql.lookback-delta", promql.LookbackDelta, "Time since the last sample after which a time series is considered stale and ignored by expression evaluations.") + } + f.BoolVar(&cfg.Iterators, "querier.iterators", false, "Use iterators to execute query, as opposed to fully materialising the series in memory.") + f.BoolVar(&cfg.BatchIterators, "querier.batch-iterators", true, "Use batch iterators to execute query, as opposed to fully materialising the series in memory. Takes precedent over the -querier.iterators flag.") + f.BoolVar(&cfg.IngesterStreaming, "querier.ingester-streaming", true, "Use streaming RPCs to query ingester.") + f.IntVar(&cfg.MaxSamples, "querier.max-samples", 50e6, "Maximum number of samples a single query can load into memory.") + f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") + f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") + f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, "The default evaluation interval or step size for subqueries.") + f.DurationVar(&cfg.QueryStoreAfter, "querier.query-store-after", 0, "The time after which a metric should only be queried from storage and not just ingesters. 0 means all queries are sent to store.") + f.StringVar(&cfg.ActiveQueryTrackerDir, "querier.active-query-tracker-dir", "./active-query-tracker", "Active query tracker monitors active queries, and writes them to the file in given directory. If Cortex discovers any queries in this log during startup, it will log them to the log file. Setting to empty value disables active query tracker, which also disables -querier.max-concurrent option.") +} + +// Validate the config +func (cfg *Config) Validate() error { + + // Ensure the config wont create a situation where no queriers are returned. + if cfg.QueryIngestersWithin != 0 && cfg.QueryStoreAfter != 0 { + if cfg.QueryStoreAfter >= cfg.QueryIngestersWithin { + return errBadLookbackConfigs + } + } + + return nil +} + +func getChunksIteratorFunction(cfg Config) chunkIteratorFunc { + if cfg.BatchIterators { + return batch.NewChunkMergeIterator + } else if cfg.Iterators { + return iterators.NewChunkMergeIterator + } + return mergeChunks +} + +func NewChunkStoreQueryable(cfg Config, chunkStore chunkstore.ChunkStore) storage.Queryable { + return newChunkStoreQueryable(chunkStore, getChunksIteratorFunction(cfg)) +} + +// New builds a queryable and promql engine. +func New(cfg Config, distributor Distributor, storeQueryable storage.Queryable, tombstonesLoader *purger.TombstonesLoader, reg prometheus.Registerer) (storage.Queryable, *promql.Engine) { + iteratorFunc := getChunksIteratorFunction(cfg) + + var queryable storage.Queryable + distributorQueryable := newDistributorQueryable(distributor, cfg.IngesterStreaming, iteratorFunc) + queryable = NewQueryable(distributorQueryable, storeQueryable, iteratorFunc, cfg, tombstonesLoader) + + lazyQueryable := storage.QueryableFunc(func(ctx context.Context, mint int64, maxt int64) (storage.Querier, error) { + querier, err := queryable.Querier(ctx, mint, maxt) + if err != nil { + return nil, err + } + return lazyquery.NewLazyQuerier(querier), nil + }) + + promql.SetDefaultEvaluationInterval(cfg.DefaultEvaluationInterval) + engine := promql.NewEngine(promql.EngineOpts{ + Logger: util.Logger, + Reg: reg, + ActiveQueryTracker: createActiveQueryTracker(cfg), + MaxSamples: cfg.MaxSamples, + Timeout: cfg.Timeout, + }) + return lazyQueryable, engine +} + +func createActiveQueryTracker(cfg Config) *promql.ActiveQueryTracker { + dir := cfg.ActiveQueryTrackerDir + + if dir != "" { + return promql.NewActiveQueryTracker(dir, cfg.MaxConcurrent, util.Logger) + } + + return nil +} + +// NewQueryable creates a new Queryable for cortex. +func NewQueryable(distributor, store storage.Queryable, chunkIterFn chunkIteratorFunc, cfg Config, tombstonesLoader *purger.TombstonesLoader) storage.Queryable { + return storage.QueryableFunc(func(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + now := time.Now() + + if cfg.MaxQueryIntoFuture > 0 { + maxQueryTime := util.TimeMilliseconds(now.Add(cfg.MaxQueryIntoFuture)) + + if mint > maxQueryTime { + return storage.NoopQuerier(), nil + } + if maxt > maxQueryTime { + maxt = maxQueryTime + } + } + + q := querier{ + ctx: ctx, + mint: mint, + maxt: maxt, + chunkIterFn: chunkIterFn, + tombstonesLoader: tombstonesLoader, + } + + dqr, err := distributor.Querier(ctx, mint, maxt) + if err != nil { + return nil, err + } + + q.metadataQuerier = dqr + + // Include ingester only if maxt is within QueryIngestersWithin w.r.t. current time. + if cfg.QueryIngestersWithin == 0 || maxt >= util.TimeMilliseconds(now.Add(-cfg.QueryIngestersWithin)) { + q.queriers = append(q.queriers, dqr) + } + + // Include store only if mint is within QueryStoreAfter w.r.t current time. + if cfg.QueryStoreAfter == 0 || mint <= util.TimeMilliseconds(now.Add(-cfg.QueryStoreAfter)) { + cqr, err := store.Querier(ctx, mint, maxt) + if err != nil { + return nil, err + } + + q.queriers = append(q.queriers, cqr) + } + + return q, nil + }) +} + +type querier struct { + // used for labels and metadata queries + metadataQuerier storage.Querier + + // used for selecting series + queriers []storage.Querier + + chunkIterFn chunkIteratorFunc + ctx context.Context + mint, maxt int64 + + tombstonesLoader *purger.TombstonesLoader +} + +// SelectSorted implements storage.Querier. +func (q querier) SelectSorted(sp *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { + // Kludge: Prometheus passes nil SelectParams if it is doing a 'series' operation, + // which needs only metadata. Here we expect that metadataQuerier querier will handle that. + // In Cortex it is not feasible to query entire history (with no mint/maxt), so we only ask ingesters and skip + // querying the long-term storage. + if sp == nil { + return q.metadataQuerier.Select(nil, matchers...) + } + + userID, err := user.ExtractOrgID(q.ctx) + if err != nil { + return nil, nil, promql.ErrStorage{Err: err} + } + + tombstones, err := q.tombstonesLoader.GetPendingTombstonesForInterval(userID, model.Time(sp.Start), model.Time(sp.End)) + if err != nil { + return nil, nil, promql.ErrStorage{Err: err} + } + + if len(q.queriers) == 1 { + seriesSet, warning, err := q.queriers[0].Select(sp, matchers...) + if err != nil { + return nil, warning, err + } + + if tombstones.Len() != 0 { + seriesSet = series.NewDeletedSeriesSet(seriesSet, tombstones, model.Interval{Start: model.Time(sp.Start), End: model.Time(sp.End)}) + } + + return seriesSet, warning, nil + } + + sets := make(chan storage.SeriesSet, len(q.queriers)) + errs := make(chan error, len(q.queriers)) + for _, querier := range q.queriers { + go func(querier storage.Querier) { + set, _, err := querier.Select(sp, matchers...) + if err != nil { + errs <- err + } else { + sets <- set + } + }(querier) + } + + var result []storage.SeriesSet + for range q.queriers { + select { + case err := <-errs: + return nil, nil, err + case set := <-sets: + result = append(result, set) + case <-q.ctx.Done(): + return nil, nil, q.ctx.Err() + } + } + + // we have all the sets from different sources (chunk from store, chunks from ingesters, + // time series from store and time series from ingesters). + // mergeSeriesSets will return sorted set. + seriesSet := q.mergeSeriesSets(result) + + if tombstones.Len() != 0 { + seriesSet = series.NewDeletedSeriesSet(seriesSet, tombstones, model.Interval{Start: model.Time(sp.Start), End: model.Time(sp.End)}) + } + return seriesSet, nil, nil +} + +// Select implements storage.Querier. +func (q querier) Select(sp *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { + return q.SelectSorted(sp, matchers...) +} + +// LabelsValue implements storage.Querier. +func (q querier) LabelValues(name string) ([]string, storage.Warnings, error) { + return q.metadataQuerier.LabelValues(name) +} + +func (q querier) LabelNames() ([]string, storage.Warnings, error) { + return q.metadataQuerier.LabelNames() +} + +func (querier) Close() error { + return nil +} + +func (q querier) mergeSeriesSets(sets []storage.SeriesSet) storage.SeriesSet { + // Here we deal with sets that are based on chunks and build single set from them. + // Remaining sets are merged with chunks-based one using storage.NewMergeSeriesSet + + otherSets := []storage.SeriesSet(nil) + chunks := []chunk.Chunk(nil) + + for _, set := range sets { + if !set.Next() { + // nothing in this set. If it has no error, we can ignore it completely. + // If there is error, we better report it. + err := set.Err() + if err != nil { + otherSets = append(otherSets, lazyquery.NewErrSeriesSet(err)) + } + continue + } + + s := set.At() + if sc, ok := s.(SeriesWithChunks); ok { + chunks = append(chunks, sc.Chunks()...) + + // iterate over remaining series in this set, and store chunks + // Here we assume that all remaining series in the set are also backed-up by chunks. + // If not, there will be panics. + for set.Next() { + s = set.At() + chunks = append(chunks, s.(SeriesWithChunks).Chunks()...) + } + } else { + // We already called set.Next() once, but we want to return same result from At() also + // to the query engine. + otherSets = append(otherSets, &seriesSetWithFirstSeries{set: set, firstSeries: s}) + } + } + + if len(chunks) == 0 { + return storage.NewMergeSeriesSet(otherSets, nil) + } + + // partitionChunks returns set with sorted series, so it can be used by NewMergeSeriesSet + chunksSet := partitionChunks(chunks, q.mint, q.maxt, q.chunkIterFn) + + if len(otherSets) == 0 { + return chunksSet + } + + otherSets = append(otherSets, chunksSet) + return storage.NewMergeSeriesSet(otherSets, nil) +} + +// This series set ignores first 'Next' call and simply returns cached result +// to avoid doing the work required to compute it twice. +type seriesSetWithFirstSeries struct { + firstNextCalled bool + firstSeries storage.Series + set storage.SeriesSet +} + +func (pss *seriesSetWithFirstSeries) Next() bool { + if pss.firstNextCalled { + pss.firstSeries = nil + return pss.set.Next() + } + pss.firstNextCalled = true + return true +} + +func (pss *seriesSetWithFirstSeries) At() storage.Series { + if pss.firstSeries != nil { + return pss.firstSeries + } + return pss.set.At() +} + +func (pss *seriesSetWithFirstSeries) Err() error { + if pss.firstSeries != nil { + return nil + } + return pss.set.Err() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go b/vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go new file mode 100644 index 000000000000..8851ed933f60 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go @@ -0,0 +1,106 @@ +package querier + +import ( + "net/http" + + "github.com/go-kit/kit/log/level" + "github.com/prometheus/prometheus/storage" + + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/util" +) + +// Queries are a set of matchers with time ranges - should not get into megabytes +const maxRemoteReadQuerySize = 1024 * 1024 + +// RemoteReadHandler handles Prometheus remote read requests. +func RemoteReadHandler(q storage.Queryable) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + compressionType := util.CompressionTypeFor(r.Header.Get("X-Prometheus-Remote-Read-Version")) + + ctx := r.Context() + var req client.ReadRequest + logger := util.WithContext(r.Context(), util.Logger) + if _, err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRemoteReadQuerySize, &req, compressionType); err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // Fetch samples for all queries in parallel. + resp := client.ReadResponse{ + Results: make([]*client.QueryResponse, len(req.Queries)), + } + errors := make(chan error) + for i, qr := range req.Queries { + go func(i int, qr *client.QueryRequest) { + from, to, matchers, err := client.FromQueryRequest(qr) + if err != nil { + errors <- err + return + } + + querier, err := q.Querier(ctx, int64(from), int64(to)) + if err != nil { + errors <- err + return + } + + params := &storage.SelectParams{ + Start: int64(from), + End: int64(to), + } + seriesSet, _, err := querier.Select(params, matchers...) + if err != nil { + errors <- err + return + } + + resp.Results[i], err = seriesSetToQueryResponse(seriesSet) + errors <- err + }(i, qr) + } + + var lastErr error + for range req.Queries { + err := <-errors + if err != nil { + lastErr = err + } + } + if lastErr != nil { + http.Error(w, lastErr.Error(), http.StatusBadRequest) + return + } + + if err := util.SerializeProtoResponse(w, &resp, compressionType); err != nil { + level.Error(logger).Log("msg", "error sending remote read response", "err", err) + } + }) +} + +func seriesSetToQueryResponse(s storage.SeriesSet) (*client.QueryResponse, error) { + result := &client.QueryResponse{} + + for s.Next() { + series := s.At() + samples := []client.Sample{} + it := series.Iterator() + for it.Next() { + t, v := it.At() + samples = append(samples, client.Sample{ + TimestampMs: t, + Value: v, + }) + } + if err := it.Err(); err != nil { + return nil, err + } + result.Timeseries = append(result.Timeseries, client.TimeSeries{ + Labels: client.FromLabelsToLabelAdapters(series.Labels()), + Samples: samples, + }) + } + + return result, s.Err() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/series_with_chunks.go b/vendor/github.com/cortexproject/cortex/pkg/querier/series_with_chunks.go new file mode 100644 index 000000000000..fa5d8c7ddddd --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/series_with_chunks.go @@ -0,0 +1,15 @@ +package querier + +import ( + "github.com/prometheus/prometheus/storage" + + "github.com/cortexproject/cortex/pkg/chunk" +) + +// SeriesWithChunks extends storage.Series interface with direct access to Cortex chunks. +type SeriesWithChunks interface { + storage.Series + + // Returns all chunks with series data. + Chunks() []chunk.Chunk +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/store_gateway_client.go b/vendor/github.com/cortexproject/cortex/pkg/querier/store_gateway_client.go new file mode 100644 index 000000000000..dd3f936b0b52 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/store_gateway_client.go @@ -0,0 +1,52 @@ +package querier + +import ( + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/cortexproject/cortex/pkg/ring/client" + "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" + "github.com/cortexproject/cortex/pkg/util/grpcclient" +) + +func NewStoreGatewayClientFactory(cfg grpcclient.Config, reg prometheus.Registerer) client.PoolFactory { + requestDuration := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "cortex", + Name: "storegateway_client_request_duration_seconds", + Help: "Time spent executing requests on store-gateway.", + Buckets: prometheus.ExponentialBuckets(0.008, 4, 7), + ConstLabels: prometheus.Labels{"client": "querier"}, + }, []string{"operation", "status_code"}) + + return func(addr string) (client.PoolClient, error) { + return dialStoreGatewayClient(cfg, addr, requestDuration) + } +} + +func dialStoreGatewayClient(cfg grpcclient.Config, addr string, requestDuration *prometheus.HistogramVec) (*storeGatewayClient, error) { + opts := []grpc.DialOption{grpc.WithInsecure()} + opts = append(opts, cfg.DialOption(grpcclient.Instrument(requestDuration))...) + conn, err := grpc.Dial(addr, opts...) + if err != nil { + return nil, errors.Wrapf(err, "failed to dial store-gateway %s", addr) + } + + return &storeGatewayClient{ + StoreGatewayClient: storegatewaypb.NewStoreGatewayClient(conn), + HealthClient: grpc_health_v1.NewHealthClient(conn), + conn: conn, + }, nil +} + +type storeGatewayClient struct { + storegatewaypb.StoreGatewayClient + grpc_health_v1.HealthClient + conn *grpc.ClientConn +} + +func (c *storeGatewayClient) Close() error { + return c.conn.Close() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/timeseries_series_set.go b/vendor/github.com/cortexproject/cortex/pkg/querier/timeseries_series_set.go new file mode 100644 index 000000000000..080c26172700 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/timeseries_series_set.go @@ -0,0 +1,99 @@ +package querier + +import ( + "sort" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/storage" + + "github.com/cortexproject/cortex/pkg/ingester/client" +) + +// timeSeriesSeriesSet is a wrapper around a client.TimeSeries slice to implement to SeriesSet interface +type timeSeriesSeriesSet struct { + ts []client.TimeSeries + i int +} + +func newTimeSeriesSeriesSet(series []client.TimeSeries) *timeSeriesSeriesSet { + sort.Sort(byTimeSeriesLabels(series)) + return &timeSeriesSeriesSet{ + ts: series, + i: -1, + } +} + +// Next implements SeriesSet interface +func (t *timeSeriesSeriesSet) Next() bool { t.i++; return t.i < len(t.ts) } + +// At implements SeriesSet interface +func (t *timeSeriesSeriesSet) At() storage.Series { + if t.i < 0 { + return nil + } + return ×eries{series: t.ts[t.i]} +} + +// Err implements SeriesSet interface +func (t *timeSeriesSeriesSet) Err() error { return nil } + +// timeseries is a type wrapper that implements the storage.Series interface +type timeseries struct { + series client.TimeSeries +} + +// timeSeriesSeriesIterator is a wrapper around a client.TimeSeries to implement the SeriesIterator interface +type timeSeriesSeriesIterator struct { + ts *timeseries + i int +} + +type byTimeSeriesLabels []client.TimeSeries + +func (b byTimeSeriesLabels) Len() int { return len(b) } +func (b byTimeSeriesLabels) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byTimeSeriesLabels) Less(i, j int) bool { + return labels.Compare(client.FromLabelAdaptersToLabels(b[i].Labels), client.FromLabelAdaptersToLabels(b[j].Labels)) < 0 +} + +// Labels implements the storage.Series interface. +// Conversion is safe because ingester sets these by calling client.FromLabelsToLabelAdapters which guarantees labels are sorted. +func (t *timeseries) Labels() labels.Labels { + return client.FromLabelAdaptersToLabels(t.series.Labels) +} + +// Iterator implements the storage.Series interface +func (t *timeseries) Iterator() storage.SeriesIterator { + return &timeSeriesSeriesIterator{ + ts: t, + i: -1, + } +} + +// Seek implements SeriesIterator interface +func (t *timeSeriesSeriesIterator) Seek(s int64) bool { + offset := 0 + if t.i > 0 { + offset = t.i // only advance via Seek + } + + t.i = sort.Search(len(t.ts.series.Samples[offset:]), func(i int) bool { + return t.ts.series.Samples[offset+i].TimestampMs >= s + }) + offset + + return t.i < len(t.ts.series.Samples) +} + +// At implements the SeriesIterator interface +func (t *timeSeriesSeriesIterator) At() (int64, float64) { + if t.i < 0 || t.i >= len(t.ts.series.Samples) { + return 0, 0 + } + return t.ts.series.Samples[t.i].TimestampMs, t.ts.series.Samples[t.i].Value +} + +// Next implements the SeriesIterator interface +func (t *timeSeriesSeriesIterator) Next() bool { t.i++; return t.i < len(t.ts.series.Samples) } + +// Err implements the SeriesIterator interface +func (t *timeSeriesSeriesIterator) Err() error { return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go new file mode 100644 index 000000000000..045631f4ac2c --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go @@ -0,0 +1,509 @@ +package ruler + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/gorilla/mux" + "github.com/pkg/errors" + v1 "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/weaveworks/common/user" + "gopkg.in/yaml.v2" + + "github.com/cortexproject/cortex/pkg/ingester/client" + rulefmt "github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt" + "github.com/cortexproject/cortex/pkg/ruler/rules" + store "github.com/cortexproject/cortex/pkg/ruler/rules" + "github.com/cortexproject/cortex/pkg/util" +) + +// In order to reimplement the prometheus rules API, a large amount of code was copied over +// This is required because the prometheus api implementation does not pass a context to +// the rule retrieval function. +// https://github.com/prometheus/prometheus/blob/2aacd807b3ec6ddd90ae55f3a42f4cffed561ea9/web/api/v1/api.go#L108 +// https://github.com/prometheus/prometheus/pull/4999 + +type response struct { + Status string `json:"status"` + Data interface{} `json:"data"` + ErrorType v1.ErrorType `json:"errorType"` + Error string `json:"error"` +} + +// AlertDiscovery has info for all active alerts. +type AlertDiscovery struct { + Alerts []*Alert `json:"alerts"` +} + +// Alert has info for an alert. +type Alert struct { + Labels labels.Labels `json:"labels"` + Annotations labels.Labels `json:"annotations"` + State string `json:"state"` + ActiveAt *time.Time `json:"activeAt"` + Value string `json:"value"` +} + +// RuleDiscovery has info for all rules +type RuleDiscovery struct { + RuleGroups []*RuleGroup `json:"groups"` +} + +// RuleGroup has info for rules which are part of a group +type RuleGroup struct { + Name string `json:"name"` + File string `json:"file"` + // In order to preserve rule ordering, while exposing type (alerting or recording) + // specific properties, both alerting and recording rules are exposed in the + // same array. + Rules []rule `json:"rules"` + Interval float64 `json:"interval"` + LastEvaluation time.Time `json:"lastEvaluation"` + EvaluationTime float64 `json:"evaluationTime"` +} + +type rule interface{} + +type alertingRule struct { + // State can be "pending", "firing", "inactive". + State string `json:"state"` + Name string `json:"name"` + Query string `json:"query"` + Duration float64 `json:"duration"` + Labels labels.Labels `json:"labels"` + Annotations labels.Labels `json:"annotations"` + Alerts []*Alert `json:"alerts"` + Health string `json:"health"` + LastError string `json:"lastError"` + Type v1.RuleType `json:"type"` + LastEvaluation time.Time `json:"lastEvaluation"` + EvaluationTime float64 `json:"evaluationTime"` +} + +type recordingRule struct { + Name string `json:"name"` + Query string `json:"query"` + Labels labels.Labels `json:"labels"` + Health string `json:"health"` + LastError string `json:"lastError"` + Type v1.RuleType `json:"type"` + LastEvaluation time.Time `json:"lastEvaluation"` + EvaluationTime float64 `json:"evaluationTime"` +} + +func respondError(logger log.Logger, w http.ResponseWriter, msg string) { + b, err := json.Marshal(&response{ + Status: "error", + ErrorType: v1.ErrServer, + Error: msg, + Data: nil, + }) + + if err != nil { + level.Error(logger).Log("msg", "error marshaling json response", "err", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusInternalServerError) + if n, err := w.Write(b); err != nil { + level.Error(logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) + } +} + +func (r *Ruler) PrometheusRules(w http.ResponseWriter, req *http.Request) { + logger := util.WithContext(req.Context(), util.Logger) + userID, ctx, err := user.ExtractOrgIDFromHTTPRequest(req) + if err != nil || userID == "" { + level.Error(logger).Log("msg", "error extracting org id from context", "err", err) + respondError(logger, w, "no valid org id found") + return + } + + w.Header().Set("Content-Type", "application/json") + rgs, err := r.GetRules(ctx) + + if err != nil { + respondError(logger, w, err.Error()) + return + } + + groups := make([]*RuleGroup, 0, len(rgs)) + + for _, g := range rgs { + grp := RuleGroup{ + Name: g.Group.Name, + File: g.Group.Namespace, + Rules: make([]rule, len(g.ActiveRules)), + Interval: g.Group.Interval.Seconds(), + LastEvaluation: g.GetEvaluationTimestamp(), + EvaluationTime: g.GetEvaluationDuration().Seconds(), + } + + for i, rl := range g.ActiveRules { + if g.ActiveRules[i].Rule.Alert != "" { + alerts := make([]*Alert, 0, len(rl.Alerts)) + for _, a := range rl.Alerts { + alerts = append(alerts, &Alert{ + Labels: client.FromLabelAdaptersToLabels(a.Labels), + Annotations: client.FromLabelAdaptersToLabels(a.Annotations), + State: a.GetState(), + ActiveAt: &a.ActiveAt, + Value: strconv.FormatFloat(a.Value, 'e', -1, 64), + }) + } + grp.Rules[i] = alertingRule{ + State: rl.GetState(), + Name: rl.Rule.GetAlert(), + Query: rl.Rule.GetExpr(), + Duration: rl.Rule.For.Seconds(), + Labels: client.FromLabelAdaptersToLabels(rl.Rule.Labels), + Annotations: client.FromLabelAdaptersToLabels(rl.Rule.Annotations), + Alerts: alerts, + Health: rl.GetHealth(), + LastError: rl.GetLastError(), + LastEvaluation: rl.GetEvaluationTimestamp(), + EvaluationTime: rl.GetEvaluationDuration().Seconds(), + Type: v1.RuleTypeAlerting, + } + } else { + grp.Rules[i] = recordingRule{ + Name: rl.Rule.GetRecord(), + Query: rl.Rule.GetExpr(), + Labels: client.FromLabelAdaptersToLabels(rl.Rule.Labels), + Health: rl.GetHealth(), + LastError: rl.GetLastError(), + LastEvaluation: rl.GetEvaluationTimestamp(), + EvaluationTime: rl.GetEvaluationDuration().Seconds(), + Type: v1.RuleTypeRecording, + } + } + } + groups = append(groups, &grp) + } + + // keep data.groups are in order + sort.Slice(groups, func(i, j int) bool { + return groups[i].File < groups[j].File + }) + + b, err := json.Marshal(&response{ + Status: "success", + Data: &RuleDiscovery{RuleGroups: groups}, + }) + if err != nil { + level.Error(logger).Log("msg", "error marshaling json response", "err", err) + respondError(logger, w, "unable to marshal the requested data") + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if n, err := w.Write(b); err != nil { + level.Error(logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) + } +} + +func (r *Ruler) PrometheusAlerts(w http.ResponseWriter, req *http.Request) { + logger := util.WithContext(req.Context(), util.Logger) + userID, ctx, err := user.ExtractOrgIDFromHTTPRequest(req) + if err != nil || userID == "" { + level.Error(logger).Log("msg", "error extracting org id from context", "err", err) + respondError(logger, w, "no valid org id found") + return + } + + w.Header().Set("Content-Type", "application/json") + rgs, err := r.GetRules(ctx) + + if err != nil { + respondError(logger, w, err.Error()) + return + } + + alerts := []*Alert{} + + for _, g := range rgs { + for _, rl := range g.ActiveRules { + if rl.Rule.Alert != "" { + for _, a := range rl.Alerts { + alerts = append(alerts, &Alert{ + Labels: client.FromLabelAdaptersToLabels(a.Labels), + Annotations: client.FromLabelAdaptersToLabels(a.Annotations), + State: a.GetState(), + ActiveAt: &a.ActiveAt, + Value: strconv.FormatFloat(a.Value, 'e', -1, 64), + }) + } + } + } + } + + b, err := json.Marshal(&response{ + Status: "success", + Data: &AlertDiscovery{Alerts: alerts}, + }) + if err != nil { + level.Error(logger).Log("msg", "error marshaling json response", "err", err) + respondError(logger, w, "unable to marshal the requested data") + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if n, err := w.Write(b); err != nil { + level.Error(logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) + } +} + +var ( + // ErrNoNamespace signals that no namespace was specified in the request + ErrNoNamespace = errors.New("a namespace must be provided in the request") + // ErrNoGroupName signals a group name url parameter was not found + ErrNoGroupName = errors.New("a matching group name must be provided in the request") + // ErrNoRuleGroups signals the rule group requested does not exist + ErrNoRuleGroups = errors.New("no rule groups found") + // ErrBadRuleGroup is returned when the provided rule group can not be unmarshalled + ErrBadRuleGroup = errors.New("unable to decoded rule group") +) + +// ValidateRuleGroup validates a rulegroup +func ValidateRuleGroup(g rulefmt.RuleGroup) []error { + var errs []error + for i, r := range g.Rules { + for _, err := range r.Validate() { + var ruleName string + if r.Alert != "" { + ruleName = r.Alert + } else { + ruleName = r.Record + } + errs = append(errs, &rulefmt.Error{ + Group: g.Name, + Rule: i, + RuleName: ruleName, + Err: err, + }) + } + } + + return errs +} + +func marshalAndSend(output interface{}, w http.ResponseWriter, logger log.Logger) { + d, err := yaml.Marshal(&output) + if err != nil { + level.Error(logger).Log("msg", "error marshalling yaml rule groups", "err", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/yaml") + if _, err := w.Write(d); err != nil { + level.Error(logger).Log("msg", "error writing yaml response", "err", err) + return + } +} + +func respondAccepted(w http.ResponseWriter, logger log.Logger) { + b, err := json.Marshal(&response{ + Status: "success", + }) + if err != nil { + level.Error(logger).Log("msg", "error marshaling json response", "err", err) + respondError(logger, w, "unable to marshal the requested data") + return + } + w.Header().Set("Content-Type", "application/json") + + // Return a status accepted because the rule has been stored and queued for polling, but is not currently active + w.WriteHeader(http.StatusAccepted) + if n, err := w.Write(b); err != nil { + level.Error(logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) + } +} + +// parseNamespace parses the namespace from the provided set of params, in this +// api these params are derived from the url path +func parseNamespace(params map[string]string) (string, error) { + namespace, exists := params["namespace"] + if !exists { + return "", ErrNoNamespace + } + + namespace, err := url.PathUnescape(namespace) + if err != nil { + return "", err + } + + return namespace, nil +} + +// parseGroupName parses the group name from the provided set of params, in this +// api these params are derived from the url path +func parseGroupName(params map[string]string) (string, error) { + groupName, exists := params["groupName"] + if !exists { + return "", ErrNoGroupName + } + + groupName, err := url.PathUnescape(groupName) + if err != nil { + return "", err + } + + return groupName, nil +} + +// parseRequest parses the incoming request to parse out the userID, rules namespace, and rule group name +// and returns them in that order. It also allows users to require a namespace or group name and return +// an error if it they can not be parsed. +func parseRequest(req *http.Request, requireNamespace, requireGroup bool) (string, string, string, error) { + userID, err := user.ExtractOrgID(req.Context()) + if err != nil { + return "", "", "", user.ErrNoOrgID + } + + vars := mux.Vars(req) + + namespace, err := parseNamespace(vars) + if err != nil { + if err != ErrNoNamespace || requireNamespace { + return "", "", "", err + } + } + + group, err := parseGroupName(vars) + if err != nil { + if err != ErrNoGroupName || requireGroup { + return "", "", "", err + } + } + + return userID, namespace, group, nil +} + +func (r *Ruler) ListRules(w http.ResponseWriter, req *http.Request) { + logger := util.WithContext(req.Context(), util.Logger) + + userID, namespace, _, err := parseRequest(req, false, false) + if err != nil { + respondError(logger, w, err.Error()) + return + } + + level.Debug(logger).Log("msg", "retrieving rule groups with namespace", "userID", userID, "namespace", namespace) + rgs, err := r.store.ListRuleGroups(req.Context(), userID, namespace) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + level.Debug(logger).Log("msg", "retrieved rule groups from rule store", "userID", userID, "num_namespaces", len(rgs)) + + if len(rgs) == 0 { + level.Info(logger).Log("msg", "no rule groups found", "userID", userID) + http.Error(w, ErrNoRuleGroups.Error(), http.StatusNotFound) + return + } + + formatted := rgs.Formatted() + marshalAndSend(formatted, w, logger) +} + +func (r *Ruler) GetRuleGroup(w http.ResponseWriter, req *http.Request) { + logger := util.WithContext(req.Context(), util.Logger) + userID, namespace, groupName, err := parseRequest(req, true, true) + if err != nil { + respondError(logger, w, err.Error()) + return + } + + rg, err := r.store.GetRuleGroup(req.Context(), userID, namespace, groupName) + if err != nil { + if err == store.ErrGroupNotFound { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + formatted := store.FromProto(rg) + marshalAndSend(formatted, w, logger) +} + +func (r *Ruler) CreateRuleGroup(w http.ResponseWriter, req *http.Request) { + logger := util.WithContext(req.Context(), util.Logger) + userID, namespace, _, err := parseRequest(req, true, false) + if err != nil { + respondError(logger, w, err.Error()) + return + } + + payload, err := ioutil.ReadAll(req.Body) + if err != nil { + level.Error(logger).Log("msg", "unable to read rule group payload", "err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + level.Debug(logger).Log("msg", "attempting to unmarshal rulegroup", "userID", userID, "group", string(payload)) + + rg := rulefmt.RuleGroup{} + err = yaml.Unmarshal(payload, &rg) + if err != nil { + level.Error(logger).Log("msg", "unable to unmarshal rule group payload", "err", err.Error()) + http.Error(w, ErrBadRuleGroup.Error(), http.StatusBadRequest) + return + } + + errs := ValidateRuleGroup(rg) + if len(errs) > 0 { + for _, err := range errs { + level.Error(logger).Log("msg", "unable to validate rule group payload", "err", err.Error()) + } + http.Error(w, errs[0].Error(), http.StatusBadRequest) + return + } + + rgProto := store.ToProto(userID, namespace, rg) + + level.Debug(logger).Log("msg", "attempting to store rulegroup", "userID", userID, "group", rgProto.String()) + err = r.store.SetRuleGroup(req.Context(), userID, namespace, rgProto) + if err != nil { + level.Error(logger).Log("msg", "unable to store rule group", "err", err.Error()) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + respondAccepted(w, logger) +} + +func (r *Ruler) DeleteRuleGroup(w http.ResponseWriter, req *http.Request) { + logger := util.WithContext(req.Context(), util.Logger) + + userID, namespace, groupName, err := parseRequest(req, true, true) + if err != nil { + respondError(logger, w, err.Error()) + return + } + + err = r.store.DeleteRuleGroup(req.Context(), userID, namespace, groupName) + if err != nil { + if err == rules.ErrGroupNotFound { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + respondError(logger, w, err.Error()) + return + } + + respondAccepted(w, logger) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go new file mode 100644 index 000000000000..70494c4e6754 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go @@ -0,0 +1,94 @@ +package ruler + +import ( + "context" + "time" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/rules" + "github.com/prometheus/prometheus/storage" + "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/ingester/client" +) + +// Pusher is an ingester server that accepts pushes. +type Pusher interface { + Push(context.Context, *client.WriteRequest) (*client.WriteResponse, error) +} +type appender struct { + pusher Pusher + labels []labels.Labels + samples []client.Sample + userID string +} + +func (a *appender) Add(l labels.Labels, t int64, v float64) (uint64, error) { + a.labels = append(a.labels, l) + a.samples = append(a.samples, client.Sample{ + TimestampMs: t, + Value: v, + }) + return 0, nil +} + +func (a *appender) AddFast(l labels.Labels, ref uint64, t int64, v float64) error { + _, err := a.Add(l, t, v) + return err +} + +func (a *appender) Commit() error { + // Since a.pusher is distributor, client.ReuseSlice will be called in a.pusher.Push. + // We shouldn't call client.ReuseSlice here. + _, err := a.pusher.Push(user.InjectOrgID(context.Background(), a.userID), client.ToWriteRequest(a.labels, a.samples, nil, client.RULE)) + a.labels = nil + a.samples = nil + return err +} + +func (a *appender) Rollback() error { + a.labels = nil + a.samples = nil + return nil +} + +// TSDB fulfills the storage.Storage interface for prometheus manager +// it allows for alerts to be restored by the manager +type tsdb struct { + pusher Pusher + userID string + queryable storage.Queryable +} + +// Appender returns a storage.Appender +func (t *tsdb) Appender() (storage.Appender, error) { + return &appender{ + pusher: t.pusher, + userID: t.userID, + }, nil +} + +// Querier returns a new Querier on the storage. +func (t *tsdb) Querier(ctx context.Context, mint int64, maxt int64) (storage.Querier, error) { + return t.queryable.Querier(ctx, mint, maxt) +} + +// StartTime returns the oldest timestamp stored in the storage. +func (t *tsdb) StartTime() (int64, error) { + return 0, nil +} + +// Close closes the storage and all its underlying resources. +func (t *tsdb) Close() error { + return nil +} + +// engineQueryFunc returns a new query function using the rules.EngineQueryFunc function +// and passing an altered timestamp. +func engineQueryFunc(engine *promql.Engine, q storage.Queryable, delay time.Duration) rules.QueryFunc { + orig := rules.EngineQueryFunc(engine, q) + return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { + return orig(ctx, qs, t.Add(-delay)) + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt/rulefmt.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt/rulefmt.go new file mode 100644 index 000000000000..4a82712a9a7e --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt/rulefmt.go @@ -0,0 +1,216 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rulefmt + +import ( + "context" + "io/ioutil" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/common/model" + yaml "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/template" +) + +// Error represents semantic errors on parsing rule groups. +type Error struct { + Group string + Rule int + RuleName string + Err error +} + +func (err *Error) Error() string { + return errors.Wrapf(err.Err, "group %q, rule %d, %q", err.Group, err.Rule, err.RuleName).Error() +} + +// RuleGroups is a set of rule groups that are typically exposed in a file. +type RuleGroups struct { + Groups []RuleGroup `yaml:"groups"` +} + +// Validate validates all rules in the rule groups. +func (g *RuleGroups) Validate() (errs []error) { + set := map[string]struct{}{} + + for _, g := range g.Groups { + if g.Name == "" { + errs = append(errs, errors.Errorf("Groupname should not be empty")) + } + + if _, ok := set[g.Name]; ok { + errs = append( + errs, + errors.Errorf("groupname: \"%s\" is repeated in the same file", g.Name), + ) + } + + set[g.Name] = struct{}{} + + for i, r := range g.Rules { + for _, err := range r.Validate() { + var ruleName string + if r.Alert != "" { + ruleName = r.Alert + } else { + ruleName = r.Record + } + errs = append(errs, &Error{ + Group: g.Name, + Rule: i, + RuleName: ruleName, + Err: err, + }) + } + } + } + + return errs +} + +// RuleGroup is a list of sequentially evaluated recording and alerting rules. +type RuleGroup struct { + Name string `yaml:"name"` + Interval model.Duration `yaml:"interval"` + Rules []Rule `yaml:"rules"` +} + +// Rule describes an alerting or recording rule. +type Rule struct { + Record string `yaml:"record"` + Alert string `yaml:"alert"` + Expr string `yaml:"expr"` + For model.Duration `yaml:"for"` + Labels map[string]string `yaml:"labels"` + Annotations map[string]string `yaml:"annotations"` +} + +// Validate the rule and return a list of encountered errors. +func (r *Rule) Validate() (errs []error) { + if r.Record != "" && r.Alert != "" { + errs = append(errs, errors.Errorf("only one of 'record' and 'alert' must be set")) + } + if r.Record == "" && r.Alert == "" { + errs = append(errs, errors.Errorf("one of 'record' or 'alert' must be set")) + } + + if r.Expr == "" { + errs = append(errs, errors.Errorf("field 'expr' must be set in rule")) + } else if _, err := promql.ParseExpr(r.Expr); err != nil { + errs = append(errs, errors.Wrap(err, "could not parse expression")) + } + if r.Record != "" { + if len(r.Annotations) > 0 { + errs = append(errs, errors.Errorf("invalid field 'annotations' in recording rule")) + } + if r.For != 0 { + errs = append(errs, errors.Errorf("invalid field 'for' in recording rule")) + } + if !model.IsValidMetricName(model.LabelValue(r.Record)) { + errs = append(errs, errors.Errorf("invalid recording rule name: %s", r.Record)) + } + } + + for k, v := range r.Labels { + if !model.LabelName(k).IsValid() { + errs = append(errs, errors.Errorf("invalid label name: %s", k)) + } + + if !model.LabelValue(v).IsValid() { + errs = append(errs, errors.Errorf("invalid label value: %s", v)) + } + } + + for k := range r.Annotations { + if !model.LabelName(k).IsValid() { + errs = append(errs, errors.Errorf("invalid annotation name: %s", k)) + } + } + + return append(errs, testTemplateParsing(r)...) +} + +// testTemplateParsing checks if the templates used in labels and annotations +// of the alerting rules are parsed correctly. +func testTemplateParsing(rl *Rule) (errs []error) { + if rl.Alert == "" { + // Not an alerting rule. + return errs + } + + // Trying to parse templates. + tmplData := template.AlertTemplateData(map[string]string{}, map[string]string{}, 0) + defs := []string{ + "{{$labels := .Labels}}", + "{{$externalLabels := .ExternalLabels}}", + "{{$value := .Value}}", + } + parseTest := func(text string) error { + tmpl := template.NewTemplateExpander( + context.TODO(), + strings.Join(append(defs, text), ""), + "__alert_"+rl.Alert, + tmplData, + model.Time(timestamp.FromTime(time.Now())), + nil, + nil, + ) + return tmpl.ParseTest() + } + + // Parsing Labels. + for k, val := range rl.Labels { + err := parseTest(val) + if err != nil { + errs = append(errs, errors.Wrapf(err, "label %q", k)) + } + } + + // Parsing Annotations. + for k, val := range rl.Annotations { + err := parseTest(val) + if err != nil { + errs = append(errs, errors.Wrapf(err, "annotation %q", k)) + } + } + + return errs +} + +// Parse parses and validates a set of rules. +func Parse(content []byte) (*RuleGroups, []error) { + var groups RuleGroups + if err := yaml.UnmarshalStrict(content, &groups); err != nil { + return nil, []error{err} + } + return &groups, groups.Validate() +} + +// ParseFile reads and parses rules from a file. +func ParseFile(file string) (*RuleGroups, []error) { + b, err := ioutil.ReadFile(file) + if err != nil { + return nil, []error{errors.Wrap(err, file)} + } + rgs, errs := Parse(b) + for i := range errs { + errs[i] = errors.Wrap(errs[i], file) + } + return rgs, errs +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go new file mode 100644 index 000000000000..f5c03d2a7f53 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go @@ -0,0 +1,16 @@ +package ruler + +import ( + "context" +) + +// TransferOut is a noop for the ruler +func (r *Ruler) TransferOut(ctx context.Context) error { + return nil +} + +// Flush triggers a flush of all the work items currently +// scheduled by the ruler, currently every ruler will +// query a backend rule store for it's rules so no +// flush is required. +func (r *Ruler) Flush() {} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/mapper.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/mapper.go new file mode 100644 index 000000000000..de3d100699ee --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/mapper.go @@ -0,0 +1,111 @@ +package ruler + +import ( + "crypto/md5" + "sort" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/spf13/afero" + "gopkg.in/yaml.v2" + + legacy_rulefmt "github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt" +) + +// mapper is designed to enusre the provided rule sets are identical +// to the on-disk rules tracked by the prometheus manager +type mapper struct { + Path string // Path specifies the directory in which rule files will be mapped. + + FS afero.Fs + logger log.Logger +} + +func newMapper(path string, logger log.Logger) *mapper { + return &mapper{ + Path: path, + FS: afero.NewOsFs(), + logger: logger, + } +} + +func (m *mapper) MapRules(user string, ruleConfigs map[string][]legacy_rulefmt.RuleGroup) (bool, []string, error) { + anyUpdated := false + filenames := []string{} + + // user rule files will be stored as `///filename` + path := m.Path + "/" + user + "/" + err := m.FS.MkdirAll(path, 0777) + if err != nil { + return false, nil, err + } + + // write all rule configs to disk + for filename, groups := range ruleConfigs { + fullFileName := path + filename + + fileUpdated, err := m.writeRuleGroupsIfNewer(groups, fullFileName) + if err != nil { + return false, nil, err + } + filenames = append(filenames, fullFileName) + anyUpdated = anyUpdated || fileUpdated + } + + // and clean any up that shouldn't exist + existingFiles, err := afero.ReadDir(m.FS, path) + if err != nil { + return false, nil, err + } + + for _, existingFile := range existingFiles { + fullFileName := path + existingFile.Name() + ruleGroups := ruleConfigs[existingFile.Name()] + + if ruleGroups == nil { + err = m.FS.Remove(fullFileName) + if err != nil { + level.Warn(m.logger).Log("msg", "unable to remove rule file on disk", "file", fullFileName, "err", err) + } + anyUpdated = true + } + } + + return anyUpdated, filenames, nil +} + +func (m *mapper) writeRuleGroupsIfNewer(groups []legacy_rulefmt.RuleGroup, filename string) (bool, error) { + sort.Slice(groups, func(i, j int) bool { + return groups[i].Name > groups[j].Name + }) + + rgs := legacy_rulefmt.RuleGroups{Groups: groups} + + d, err := yaml.Marshal(&rgs) + if err != nil { + return false, err + } + + _, err = m.FS.Stat(filename) + if err == nil { + current, err := afero.ReadFile(m.FS, filename) + if err != nil { + return false, err + } + newHash := md5.New() + currentHash := md5.New() + + // bailout if there is no update + if string(currentHash.Sum(current)) == string(newHash.Sum(d)) { + return false, nil + } + } + + level.Info(m.logger).Log("msg", "updating rule file", "file", filename) + err = afero.WriteFile(m.FS, filename, d, 0777) + if err != nil { + return false, err + } + + return true, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go new file mode 100644 index 000000000000..fb36d7e8089a --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go @@ -0,0 +1,141 @@ +package ruler + +import ( + "context" + "fmt" + "strings" + "sync" + + gklog "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + sd_config "github.com/prometheus/prometheus/discovery/config" + "github.com/prometheus/prometheus/discovery/dns" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/notifier" +) + +// rulerNotifier bundles a notifier.Manager together with an associated +// Alertmanager service discovery manager and handles the lifecycle +// of both actors. +type rulerNotifier struct { + notifier *notifier.Manager + sdCancel context.CancelFunc + sdManager *discovery.Manager + wg sync.WaitGroup + logger gklog.Logger +} + +func newRulerNotifier(o *notifier.Options, l gklog.Logger) *rulerNotifier { + sdCtx, sdCancel := context.WithCancel(context.Background()) + return &rulerNotifier{ + notifier: notifier.NewManager(o, l), + sdCancel: sdCancel, + sdManager: discovery.NewManager(sdCtx, l), + logger: l, + } +} + +func (rn *rulerNotifier) run() { + rn.wg.Add(2) + go func() { + if err := rn.sdManager.Run(); err != nil { + level.Error(rn.logger).Log("msg", "error starting notifier discovery manager", "err", err) + } + rn.wg.Done() + }() + go func() { + rn.notifier.Run(rn.sdManager.SyncCh()) + rn.wg.Done() + }() +} + +func (rn *rulerNotifier) applyConfig(cfg *config.Config) error { + if err := rn.notifier.ApplyConfig(cfg); err != nil { + return err + } + + sdCfgs := make(map[string]sd_config.ServiceDiscoveryConfig) + for k, v := range cfg.AlertingConfig.AlertmanagerConfigs.ToMap() { + sdCfgs[k] = v.ServiceDiscoveryConfig + } + return rn.sdManager.ApplyConfig(sdCfgs) +} + +func (rn *rulerNotifier) stop() { + rn.sdCancel() + rn.notifier.Stop() + rn.wg.Wait() +} + +// Builds a Prometheus config.Config from a ruler.Config with just the required +// options to configure notifications to Alertmanager. +func buildNotifierConfig(rulerConfig *Config) (*config.Config, error) { + if rulerConfig.AlertmanagerURL.URL == nil { + return &config.Config{}, nil + } + + u := rulerConfig.AlertmanagerURL + var sdConfig sd_config.ServiceDiscoveryConfig + if rulerConfig.AlertmanagerDiscovery { + if !strings.Contains(u.Host, "_tcp.") { + return nil, fmt.Errorf("When alertmanager-discovery is on, host name must be of the form _portname._tcp.service.fqdn (is %q)", u.Host) + } + dnsSDConfig := dns.SDConfig{ + Names: []string{u.Host}, + RefreshInterval: model.Duration(rulerConfig.AlertmanagerRefreshInterval), + Type: "SRV", + Port: 0, // Ignored, because of SRV. + } + sdConfig = sd_config.ServiceDiscoveryConfig{ + DNSSDConfigs: []*dns.SDConfig{&dnsSDConfig}, + } + } else { + sdConfig = sd_config.ServiceDiscoveryConfig{ + StaticConfigs: []*targetgroup.Group{ + { + Targets: []model.LabelSet{ + { + model.AddressLabel: model.LabelValue(u.Host), + }, + }, + }, + }, + } + } + + amConfig := &config.AlertmanagerConfig{ + APIVersion: config.AlertmanagerAPIVersionV1, + Scheme: u.Scheme, + PathPrefix: u.Path, + Timeout: model.Duration(rulerConfig.NotificationTimeout), + ServiceDiscoveryConfig: sdConfig, + } + + if rulerConfig.AlertmanangerEnableV2API { + amConfig.APIVersion = config.AlertmanagerAPIVersionV2 + } + + promConfig := &config.Config{ + AlertingConfig: config.AlertingConfig{ + AlertmanagerConfigs: []*config.AlertmanagerConfig{amConfig}, + }, + } + + if u.User != nil { + amConfig.HTTPClientConfig = config_util.HTTPClientConfig{ + BasicAuth: &config_util.BasicAuth{ + Username: u.User.Username(), + }, + } + + if password, isSet := u.User.Password(); isSet { + amConfig.HTTPClientConfig.BasicAuth.Password = config_util.Secret(password) + } + } + + return promConfig, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go new file mode 100644 index 000000000000..60b9fbde0e7a --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go @@ -0,0 +1,638 @@ +package ruler + +import ( + "context" + "flag" + "fmt" + "hash/fnv" + "net/http" + "net/url" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + ot "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/notifier" + "github.com/prometheus/prometheus/promql" + promRules "github.com/prometheus/prometheus/rules" + promStorage "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/strutil" + "github.com/weaveworks/common/user" + "golang.org/x/net/context/ctxhttp" + "google.golang.org/grpc" + + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/ruler/rules" + store "github.com/cortexproject/cortex/pkg/ruler/rules" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/services" +) + +var ( + ringCheckErrors = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "ruler_ring_check_errors_total", + Help: "Number of errors that have occurred when checking the ring for ownership", + }) + configUpdatesTotal = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "ruler_config_updates_total", + Help: "Total number of config updates triggered by a user", + }, []string{"user"}) + managersTotal = promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: "cortex", + Name: "ruler_managers_total", + Help: "Total number of managers registered and running in the ruler", + }) +) + +// Config is the configuration for the recording rules server. +type Config struct { + // This is used for template expansion in alerts; must be a valid URL. + ExternalURL flagext.URLValue `yaml:"external_url"` + // How frequently to evaluate rules by default. + EvaluationInterval time.Duration `yaml:"evaluation_interval"` + // Delay the evaluation of all rules by a set interval to give a buffer + // to metric that haven't been forwarded to cortex yet. + EvaluationDelay time.Duration `yaml:"evaluation_delay_duration"` + // How frequently to poll for updated rules. + PollInterval time.Duration `yaml:"poll_interval"` + // Rule Storage and Polling configuration. + StoreConfig RuleStoreConfig `yaml:"storage"` + // Path to store rule files for prom manager. + RulePath string `yaml:"rule_path"` + + // URL of the Alertmanager to send notifications to. + AlertmanagerURL flagext.URLValue `yaml:"alertmanager_url"` + // Whether to use DNS SRV records to discover alertmanagers. + AlertmanagerDiscovery bool `yaml:"enable_alertmanager_discovery"` + // How long to wait between refreshing the list of alertmanagers based on DNS service discovery. + AlertmanagerRefreshInterval time.Duration `yaml:"alertmanager_refresh_interval"` + // Enables the ruler notifier to use the alertmananger V2 API. + AlertmanangerEnableV2API bool `yaml:"enable_alertmanager_v2"` + // Capacity of the queue for notifications to be sent to the Alertmanager. + NotificationQueueCapacity int `yaml:"notification_queue_capacity"` + // HTTP timeout duration when sending notifications to the Alertmanager. + NotificationTimeout time.Duration `yaml:"notification_timeout"` + + // Enable sharding rule groups. + EnableSharding bool `yaml:"enable_sharding"` + SearchPendingFor time.Duration `yaml:"search_pending_for"` + Ring RingConfig `yaml:"ring"` + FlushCheckPeriod time.Duration `yaml:"flush_period"` + + EnableAPI bool `yaml:"enable_api"` +} + +// RegisterFlags adds the flags required to config this to the given FlagSet +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.StoreConfig.RegisterFlags(f) + cfg.Ring.RegisterFlags(f) + + // Deprecated Flags that will be maintained to avoid user disruption + flagext.DeprecatedFlag(f, "ruler.client-timeout", "This flag has been renamed to ruler.configs.client-timeout") + flagext.DeprecatedFlag(f, "ruler.group-timeout", "This flag is no longer functional.") + flagext.DeprecatedFlag(f, "ruler.num-workers", "This flag is no longer functional. For increased concurrency horizontal sharding is recommended") + + cfg.ExternalURL.URL, _ = url.Parse("") // Must be non-nil + f.Var(&cfg.ExternalURL, "ruler.external.url", "URL of alerts return path.") + f.DurationVar(&cfg.EvaluationInterval, "ruler.evaluation-interval", 1*time.Minute, "How frequently to evaluate rules") + f.DurationVar(&cfg.EvaluationDelay, "ruler.evaluation-delay-duration", 0, "Duration to delay the evaluation of rules to ensure they underlying metrics have been pushed to cortex.") + f.DurationVar(&cfg.PollInterval, "ruler.poll-interval", 1*time.Minute, "How frequently to poll for rule changes") + f.Var(&cfg.AlertmanagerURL, "ruler.alertmanager-url", "URL of the Alertmanager to send notifications to.") + f.BoolVar(&cfg.AlertmanagerDiscovery, "ruler.alertmanager-discovery", false, "Use DNS SRV records to discover alertmanager hosts.") + f.DurationVar(&cfg.AlertmanagerRefreshInterval, "ruler.alertmanager-refresh-interval", 1*time.Minute, "How long to wait between refreshing alertmanager hosts.") + f.BoolVar(&cfg.AlertmanangerEnableV2API, "ruler.alertmanager-use-v2", false, "If enabled requests to alertmanager will utilize the V2 API.") + f.IntVar(&cfg.NotificationQueueCapacity, "ruler.notification-queue-capacity", 10000, "Capacity of the queue for notifications to be sent to the Alertmanager.") + f.DurationVar(&cfg.NotificationTimeout, "ruler.notification-timeout", 10*time.Second, "HTTP timeout duration when sending notifications to the Alertmanager.") + if flag.Lookup("promql.lookback-delta") == nil { + flag.DurationVar(&promql.LookbackDelta, "promql.lookback-delta", promql.LookbackDelta, "Time since the last sample after which a time series is considered stale and ignored by expression evaluations.") + } + f.DurationVar(&cfg.SearchPendingFor, "ruler.search-pending-for", 5*time.Minute, "Time to spend searching for a pending ruler when shutting down.") + f.BoolVar(&cfg.EnableSharding, "ruler.enable-sharding", false, "Distribute rule evaluation using ring backend") + f.DurationVar(&cfg.FlushCheckPeriod, "ruler.flush-period", 1*time.Minute, "Period with which to attempt to flush rule groups.") + f.StringVar(&cfg.RulePath, "ruler.rule-path", "/rules", "file path to store temporary rule files for the prometheus rule managers") + f.BoolVar(&cfg.EnableAPI, "experimental.ruler.enable-api", false, "Enable the ruler api") +} + +// Ruler evaluates rules. +type Ruler struct { + services.Service + + cfg Config + engine *promql.Engine + queryable promStorage.Queryable + pusher Pusher + alertURL *url.URL + notifierCfg *config.Config + + lifecycler *ring.Lifecycler + ring *ring.Ring + subservices *services.Manager + + store rules.RuleStore + mapper *mapper + userManagerMtx sync.Mutex + userManagers map[string]*promRules.Manager + + // Per-user notifiers with separate queues. + notifiersMtx sync.Mutex + notifiers map[string]*rulerNotifier + + registry prometheus.Registerer + logger log.Logger +} + +// NewRuler creates a new ruler from a distributor and chunk store. +func NewRuler(cfg Config, engine *promql.Engine, queryable promStorage.Queryable, pusher Pusher, reg prometheus.Registerer, logger log.Logger) (*Ruler, error) { + ncfg, err := buildNotifierConfig(&cfg) + if err != nil { + return nil, err + } + + ruleStore, err := NewRuleStorage(cfg.StoreConfig) + if err != nil { + return nil, err + } + + ruler := &Ruler{ + cfg: cfg, + engine: engine, + queryable: queryable, + alertURL: cfg.ExternalURL.URL, + notifierCfg: ncfg, + notifiers: map[string]*rulerNotifier{}, + store: ruleStore, + pusher: pusher, + mapper: newMapper(cfg.RulePath, logger), + userManagers: map[string]*promRules.Manager{}, + registry: reg, + logger: logger, + } + + ruler.Service = services.NewBasicService(ruler.starting, ruler.run, ruler.stopping) + return ruler, nil +} + +func (r *Ruler) starting(ctx context.Context) error { + // If sharding is enabled, create/join a ring to distribute tokens to + // the ruler + if r.cfg.EnableSharding { + lifecyclerCfg := r.cfg.Ring.ToLifecyclerConfig() + var err error + r.lifecycler, err = ring.NewLifecycler(lifecyclerCfg, r, "ruler", ring.RulerRingKey, true) + if err != nil { + return errors.Wrap(err, "failed to initialize ruler's lifecycler") + } + + r.ring, err = ring.New(lifecyclerCfg.RingConfig, "ruler", ring.RulerRingKey) + if err != nil { + return errors.Wrap(err, "failed to initialize ruler's ring") + } + + r.subservices, err = services.NewManager(r.lifecycler, r.ring) + if err == nil { + err = services.StartManagerAndAwaitHealthy(ctx, r.subservices) + } + return errors.Wrap(err, "failed to start ruler's services") + } + + // TODO: ideally, ruler would wait until its queryable is finished starting. + return nil +} + +// Stop stops the Ruler. +// Each function of the ruler is terminated before leaving the ring +func (r *Ruler) stopping(_ error) error { + r.notifiersMtx.Lock() + for _, n := range r.notifiers { + n.stop() + } + r.notifiersMtx.Unlock() + + if r.subservices != nil { + // subservices manages ring and lifecycler, if sharding was enabled. + _ = services.StopManagerAndAwaitStopped(context.Background(), r.subservices) + } + + level.Info(r.logger).Log("msg", "stopping user managers") + wg := sync.WaitGroup{} + r.userManagerMtx.Lock() + for user, manager := range r.userManagers { + level.Debug(r.logger).Log("msg", "shutting down user manager", "user", user) + wg.Add(1) + go func(manager *promRules.Manager, user string) { + manager.Stop() + wg.Done() + level.Debug(r.logger).Log("msg", "user manager shut down", "user", user) + }(manager, user) + } + wg.Wait() + r.userManagerMtx.Unlock() + level.Info(r.logger).Log("msg", "all user managers stopped") + return nil +} + +// sendAlerts implements a rules.NotifyFunc for a Notifier. +// It filters any non-firing alerts from the input. +// +// Copied from Prometheus's main.go. +func sendAlerts(n *notifier.Manager, externalURL string) promRules.NotifyFunc { + return func(ctx context.Context, expr string, alerts ...*promRules.Alert) { + var res []*notifier.Alert + + for _, alert := range alerts { + // Only send actually firing alerts. + if alert.State == promRules.StatePending { + continue + } + a := ¬ifier.Alert{ + StartsAt: alert.FiredAt, + Labels: alert.Labels, + Annotations: alert.Annotations, + GeneratorURL: externalURL + strutil.TableLinkForExpression(expr), + } + if !alert.ResolvedAt.IsZero() { + a.EndsAt = alert.ResolvedAt + } + res = append(res, a) + } + + if len(alerts) > 0 { + n.Send(res...) + } + } +} + +func (r *Ruler) getOrCreateNotifier(userID string) (*notifier.Manager, error) { + r.notifiersMtx.Lock() + defer r.notifiersMtx.Unlock() + + n, ok := r.notifiers[userID] + if ok { + return n.notifier, nil + } + + n = newRulerNotifier(¬ifier.Options{ + QueueCapacity: r.cfg.NotificationQueueCapacity, + Do: func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Note: The passed-in context comes from the Prometheus notifier + // and does *not* contain the userID. So it needs to be added to the context + // here before using the context to inject the userID into the HTTP request. + ctx = user.InjectOrgID(ctx, userID) + if err := user.InjectOrgIDIntoHTTPRequest(ctx, req); err != nil { + return nil, err + } + // Jaeger complains the passed-in context has an invalid span ID, so start a new root span + sp := ot.GlobalTracer().StartSpan("notify", ot.Tag{Key: "organization", Value: userID}) + defer sp.Finish() + ctx = ot.ContextWithSpan(ctx, sp) + _ = ot.GlobalTracer().Inject(sp.Context(), ot.HTTPHeaders, ot.HTTPHeadersCarrier(req.Header)) + return ctxhttp.Do(ctx, client, req) + }, + }, util.Logger) + + go n.run() + + // This should never fail, unless there's a programming mistake. + if err := n.applyConfig(r.notifierCfg); err != nil { + return nil, err + } + + r.notifiers[userID] = n + return n.notifier, nil +} + +func (r *Ruler) ownsRule(hash uint32) (bool, error) { + rlrs, err := r.ring.Get(hash, ring.Read, []ring.IngesterDesc{}) + if err != nil { + level.Warn(r.logger).Log("msg", "error reading ring to verify rule group ownership", "err", err) + ringCheckErrors.Inc() + return false, err + } + if rlrs.Ingesters[0].Addr == r.lifecycler.Addr { + level.Debug(r.logger).Log("msg", "rule group owned", "owner_addr", rlrs.Ingesters[0].Addr, "addr", r.lifecycler.Addr) + return true, nil + } + level.Debug(r.logger).Log("msg", "rule group not owned, address does not match", "owner_addr", rlrs.Ingesters[0].Addr, "addr", r.lifecycler.Addr) + return false, nil +} + +func (r *Ruler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if r.cfg.EnableSharding { + r.ring.ServeHTTP(w, req) + } else { + var unshardedPage = ` + + + + + Cortex Ruler Status + + +

Cortex Ruler Status

+

Ruler running with shards disabled

+ + ` + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte(unshardedPage)) + if err != nil { + level.Error(r.logger).Log("msg", "unable to serve status page", "err", err) + } + } +} + +func (r *Ruler) run(ctx context.Context) error { + level.Info(r.logger).Log("msg", "ruler up and running") + + tick := time.NewTicker(r.cfg.PollInterval) + defer tick.Stop() + + r.loadRules(ctx) + for { + select { + case <-ctx.Done(): + return nil + case <-tick.C: + r.loadRules(ctx) + r.userManagerMtx.Lock() + managersTotal.Set(float64(len(r.userManagers))) + r.userManagerMtx.Unlock() + } + } +} + +func (r *Ruler) loadRules(ctx context.Context) { + ringHasher := fnv.New32a() + + configs, err := r.store.ListAllRuleGroups(ctx) + if err != nil { + level.Error(r.logger).Log("msg", "unable to poll for rules", "err", err) + return + } + + // Iterate through each users configuration and determine if the on-disk + // configurations need to be updated + for user, cfg := range configs { + filteredGroups := store.RuleGroupList{} + + // If sharding is enabled, prune the rule group to only contain rules + // this ruler is responsible for. + if r.cfg.EnableSharding { + for _, g := range cfg { + id := g.User + "/" + g.Namespace + "/" + g.Name + ringHasher.Reset() + _, err = ringHasher.Write([]byte(id)) + if err != nil { + level.Error(r.logger).Log("msg", "failed to create group for user", "user", user, "namespace", g.Namespace, "group", g.Name, "err", err) + continue + } + hash := ringHasher.Sum32() + owned, err := r.ownsRule(hash) + if err != nil { + level.Error(r.logger).Log("msg", "unable to verify rule group ownership ownership, will retry on the next poll", "err", err) + return + } + if owned { + filteredGroups = append(filteredGroups, g) + } + } + } else { + filteredGroups = cfg + } + + r.syncManager(ctx, user, filteredGroups) + } + + // Check for deleted users and remove them + r.userManagerMtx.Lock() + defer r.userManagerMtx.Unlock() + for user, mngr := range r.userManagers { + if _, exists := configs[user]; !exists { + go mngr.Stop() + delete(r.userManagers, user) + level.Info(r.logger).Log("msg", "deleting rule manager", "user", user) + } + } + +} + +// syncManager maps the rule files to disk, detects any changes and will create/update the +// the users Prometheus Rules Manager. +func (r *Ruler) syncManager(ctx context.Context, user string, groups store.RuleGroupList) { + // A lock is taken to ensure if syncManager is called concurrently, that each call + // returns after the call map files and check for updates + r.userManagerMtx.Lock() + defer r.userManagerMtx.Unlock() + + // Map the files to disk and return the file names to be passed to the users manager if they + // have been updated + update, files, err := r.mapper.MapRules(user, groups.Formatted()) + if err != nil { + level.Error(r.logger).Log("msg", "unable to map rule files", "user", user, "err", err) + return + } + + if update { + level.Debug(r.logger).Log("msg", "updating rules", "user", "user") + configUpdatesTotal.WithLabelValues(user).Inc() + manager, exists := r.userManagers[user] + if !exists { + manager, err = r.newManager(ctx, user) + if err != nil { + level.Error(r.logger).Log("msg", "unable to create rule manager", "user", user, "err", err) + return + } + manager.Run() + r.userManagers[user] = manager + } + err = manager.Update(r.cfg.EvaluationInterval, files, nil) + if err != nil { + level.Error(r.logger).Log("msg", "unable to update rule manager", "user", user, "err", err) + return + } + } +} + +// newManager creates a prometheus rule manager wrapped with a user id +// configured storage, appendable, notifier, and instrumentation +func (r *Ruler) newManager(ctx context.Context, userID string) (*promRules.Manager, error) { + tsdb := &tsdb{ + pusher: r.pusher, + userID: userID, + queryable: r.queryable, + } + + notifier, err := r.getOrCreateNotifier(userID) + if err != nil { + return nil, err + } + + // Wrap registerer with userID and cortex_ prefix + reg := prometheus.WrapRegistererWith(prometheus.Labels{"user": userID}, r.registry) + reg = prometheus.WrapRegistererWithPrefix("cortex_", reg) + logger := log.With(r.logger, "user", userID) + opts := &promRules.ManagerOptions{ + Appendable: tsdb, + TSDB: tsdb, + QueryFunc: engineQueryFunc(r.engine, r.queryable, r.cfg.EvaluationDelay), + Context: user.InjectOrgID(ctx, userID), + ExternalURL: r.alertURL, + NotifyFunc: sendAlerts(notifier, r.alertURL.String()), + Logger: logger, + Registerer: reg, + } + return promRules.NewManager(opts), nil +} + +// GetRules retrieves the running rules from this ruler and all running rulers in the ring if +// sharding is enabled +func (r *Ruler) GetRules(ctx context.Context) ([]*GroupStateDesc, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, fmt.Errorf("no user id found in context") + } + + if r.cfg.EnableSharding { + return r.getShardedRules(ctx) + } + + return r.getLocalRules(userID) +} + +func (r *Ruler) getLocalRules(userID string) ([]*GroupStateDesc, error) { + var groups []*promRules.Group + r.userManagerMtx.Lock() + if mngr, exists := r.userManagers[userID]; exists { + groups = mngr.RuleGroups() + } + r.userManagerMtx.Unlock() + + groupDescs := make([]*GroupStateDesc, 0, len(groups)) + prefix := filepath.Join(r.cfg.RulePath, userID) + "/" + + for _, group := range groups { + interval := group.Interval() + groupDesc := &GroupStateDesc{ + Group: &rules.RuleGroupDesc{ + Name: group.Name(), + Namespace: strings.TrimPrefix(group.File(), prefix), + Interval: interval, + User: userID, + }, + EvaluationTimestamp: group.GetEvaluationTimestamp(), + EvaluationDuration: group.GetEvaluationDuration(), + } + for _, r := range group.Rules() { + lastError := "" + if r.LastError() != nil { + lastError = r.LastError().Error() + } + + var ruleDesc *RuleStateDesc + switch rule := r.(type) { + case *promRules.AlertingRule: + rule.ActiveAlerts() + alerts := []*AlertStateDesc{} + for _, a := range rule.ActiveAlerts() { + alerts = append(alerts, &AlertStateDesc{ + State: a.State.String(), + Labels: client.FromLabelsToLabelAdapters(a.Labels), + Annotations: client.FromLabelsToLabelAdapters(a.Annotations), + Value: a.Value, + ActiveAt: a.ActiveAt, + FiredAt: a.FiredAt, + ResolvedAt: a.ResolvedAt, + LastSentAt: a.LastSentAt, + ValidUntil: a.ValidUntil, + }) + } + ruleDesc = &RuleStateDesc{ + Rule: &rules.RuleDesc{ + Expr: rule.Query().String(), + Alert: rule.Name(), + For: rule.Duration(), + Labels: client.FromLabelsToLabelAdapters(rule.Labels()), + Annotations: client.FromLabelsToLabelAdapters(rule.Annotations()), + }, + State: rule.State().String(), + Health: string(rule.Health()), + LastError: lastError, + Alerts: alerts, + EvaluationTimestamp: rule.GetEvaluationTimestamp(), + EvaluationDuration: rule.GetEvaluationDuration(), + } + case *promRules.RecordingRule: + ruleDesc = &RuleStateDesc{ + Rule: &rules.RuleDesc{ + Record: rule.Name(), + Expr: rule.Query().String(), + Labels: client.FromLabelsToLabelAdapters(rule.Labels()), + }, + Health: string(rule.Health()), + LastError: lastError, + EvaluationTimestamp: rule.GetEvaluationTimestamp(), + EvaluationDuration: rule.GetEvaluationDuration(), + } + default: + return nil, errors.Errorf("failed to assert type of rule '%v'", rule.Name()) + } + groupDesc.ActiveRules = append(groupDesc.ActiveRules, ruleDesc) + } + groupDescs = append(groupDescs, groupDesc) + } + return groupDescs, nil +} + +func (r *Ruler) getShardedRules(ctx context.Context) ([]*GroupStateDesc, error) { + rulers, err := r.ring.GetAll() + if err != nil { + return nil, err + } + + ctx, err = user.InjectIntoGRPCRequest(ctx) + if err != nil { + return nil, fmt.Errorf("unable to inject user ID into grpc request, %v", err) + } + + rgs := []*GroupStateDesc{} + + for _, rlr := range rulers.Ingesters { + conn, err := grpc.Dial(rlr.Addr, grpc.WithInsecure()) + if err != nil { + return nil, err + } + cc := NewRulerClient(conn) + newGrps, err := cc.Rules(ctx, nil) + if err != nil { + return nil, fmt.Errorf("unable to retrieve rules from other rulers, %v", err) + } + rgs = append(rgs, newGrps.Groups...) + } + + return rgs, nil +} + +// Rules implements the rules service +func (r *Ruler) Rules(ctx context.Context, in *RulesRequest) (*RulesResponse, error) { + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, fmt.Errorf("no user id found in context") + } + + groupDescs, err := r.getLocalRules(userID) + if err != nil { + return nil, err + } + + return &RulesResponse{Groups: groupDescs}, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.pb.go new file mode 100644 index 000000000000..88d7d5daa301 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.pb.go @@ -0,0 +1,2380 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ruler.proto + +package ruler + +import ( + context "context" + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/cortexproject/cortex/pkg/ingester/client" + github_com_cortexproject_cortex_pkg_ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" + rules "github.com/cortexproject/cortex/pkg/ruler/rules" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + _ "github.com/golang/protobuf/ptypes/duration" + _ "github.com/golang/protobuf/ptypes/timestamp" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type RulesRequest struct { +} + +func (m *RulesRequest) Reset() { *m = RulesRequest{} } +func (*RulesRequest) ProtoMessage() {} +func (*RulesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9ecbec0a4cfddea6, []int{0} +} +func (m *RulesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RulesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RulesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RulesRequest.Merge(m, src) +} +func (m *RulesRequest) XXX_Size() int { + return m.Size() +} +func (m *RulesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RulesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RulesRequest proto.InternalMessageInfo + +type RulesResponse struct { + Groups []*GroupStateDesc `protobuf:"bytes,1,rep,name=groups,proto3" json:"groups,omitempty"` +} + +func (m *RulesResponse) Reset() { *m = RulesResponse{} } +func (*RulesResponse) ProtoMessage() {} +func (*RulesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9ecbec0a4cfddea6, []int{1} +} +func (m *RulesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RulesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RulesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RulesResponse.Merge(m, src) +} +func (m *RulesResponse) XXX_Size() int { + return m.Size() +} +func (m *RulesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RulesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RulesResponse proto.InternalMessageInfo + +func (m *RulesResponse) GetGroups() []*GroupStateDesc { + if m != nil { + return m.Groups + } + return nil +} + +// GroupStateDesc is a proto representation of a cortex rule group +type GroupStateDesc struct { + Group *rules.RuleGroupDesc `protobuf:"bytes,1,opt,name=group,proto3" json:"group,omitempty"` + ActiveRules []*RuleStateDesc `protobuf:"bytes,2,rep,name=active_rules,json=activeRules,proto3" json:"active_rules,omitempty"` + EvaluationTimestamp time.Time `protobuf:"bytes,3,opt,name=evaluationTimestamp,proto3,stdtime" json:"evaluationTimestamp"` + EvaluationDuration time.Duration `protobuf:"bytes,4,opt,name=evaluationDuration,proto3,stdduration" json:"evaluationDuration"` +} + +func (m *GroupStateDesc) Reset() { *m = GroupStateDesc{} } +func (*GroupStateDesc) ProtoMessage() {} +func (*GroupStateDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_9ecbec0a4cfddea6, []int{2} +} +func (m *GroupStateDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupStateDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GroupStateDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GroupStateDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupStateDesc.Merge(m, src) +} +func (m *GroupStateDesc) XXX_Size() int { + return m.Size() +} +func (m *GroupStateDesc) XXX_DiscardUnknown() { + xxx_messageInfo_GroupStateDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupStateDesc proto.InternalMessageInfo + +func (m *GroupStateDesc) GetGroup() *rules.RuleGroupDesc { + if m != nil { + return m.Group + } + return nil +} + +func (m *GroupStateDesc) GetActiveRules() []*RuleStateDesc { + if m != nil { + return m.ActiveRules + } + return nil +} + +func (m *GroupStateDesc) GetEvaluationTimestamp() time.Time { + if m != nil { + return m.EvaluationTimestamp + } + return time.Time{} +} + +func (m *GroupStateDesc) GetEvaluationDuration() time.Duration { + if m != nil { + return m.EvaluationDuration + } + return 0 +} + +// RuleStateDesc is a proto representation of a Prometheus Rule +type RuleStateDesc struct { + Rule *rules.RuleDesc `protobuf:"bytes,1,opt,name=rule,proto3" json:"rule,omitempty"` + State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + Health string `protobuf:"bytes,3,opt,name=health,proto3" json:"health,omitempty"` + LastError string `protobuf:"bytes,4,opt,name=lastError,proto3" json:"lastError,omitempty"` + Alerts []*AlertStateDesc `protobuf:"bytes,5,rep,name=alerts,proto3" json:"alerts,omitempty"` + EvaluationTimestamp time.Time `protobuf:"bytes,6,opt,name=evaluationTimestamp,proto3,stdtime" json:"evaluationTimestamp"` + EvaluationDuration time.Duration `protobuf:"bytes,7,opt,name=evaluationDuration,proto3,stdduration" json:"evaluationDuration"` +} + +func (m *RuleStateDesc) Reset() { *m = RuleStateDesc{} } +func (*RuleStateDesc) ProtoMessage() {} +func (*RuleStateDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_9ecbec0a4cfddea6, []int{3} +} +func (m *RuleStateDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleStateDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RuleStateDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RuleStateDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleStateDesc.Merge(m, src) +} +func (m *RuleStateDesc) XXX_Size() int { + return m.Size() +} +func (m *RuleStateDesc) XXX_DiscardUnknown() { + xxx_messageInfo_RuleStateDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleStateDesc proto.InternalMessageInfo + +func (m *RuleStateDesc) GetRule() *rules.RuleDesc { + if m != nil { + return m.Rule + } + return nil +} + +func (m *RuleStateDesc) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func (m *RuleStateDesc) GetHealth() string { + if m != nil { + return m.Health + } + return "" +} + +func (m *RuleStateDesc) GetLastError() string { + if m != nil { + return m.LastError + } + return "" +} + +func (m *RuleStateDesc) GetAlerts() []*AlertStateDesc { + if m != nil { + return m.Alerts + } + return nil +} + +func (m *RuleStateDesc) GetEvaluationTimestamp() time.Time { + if m != nil { + return m.EvaluationTimestamp + } + return time.Time{} +} + +func (m *RuleStateDesc) GetEvaluationDuration() time.Duration { + if m != nil { + return m.EvaluationDuration + } + return 0 +} + +type AlertStateDesc struct { + State string `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Labels []github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter `protobuf:"bytes,2,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter" json:"labels"` + Annotations []github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter `protobuf:"bytes,3,rep,name=annotations,proto3,customtype=github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter" json:"annotations"` + Value float64 `protobuf:"fixed64,4,opt,name=value,proto3" json:"value,omitempty"` + ActiveAt time.Time `protobuf:"bytes,5,opt,name=active_at,json=activeAt,proto3,stdtime" json:"active_at"` + FiredAt time.Time `protobuf:"bytes,6,opt,name=fired_at,json=firedAt,proto3,stdtime" json:"fired_at"` + ResolvedAt time.Time `protobuf:"bytes,7,opt,name=resolved_at,json=resolvedAt,proto3,stdtime" json:"resolved_at"` + LastSentAt time.Time `protobuf:"bytes,8,opt,name=last_sent_at,json=lastSentAt,proto3,stdtime" json:"last_sent_at"` + ValidUntil time.Time `protobuf:"bytes,9,opt,name=valid_until,json=validUntil,proto3,stdtime" json:"valid_until"` +} + +func (m *AlertStateDesc) Reset() { *m = AlertStateDesc{} } +func (*AlertStateDesc) ProtoMessage() {} +func (*AlertStateDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_9ecbec0a4cfddea6, []int{4} +} +func (m *AlertStateDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AlertStateDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AlertStateDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AlertStateDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_AlertStateDesc.Merge(m, src) +} +func (m *AlertStateDesc) XXX_Size() int { + return m.Size() +} +func (m *AlertStateDesc) XXX_DiscardUnknown() { + xxx_messageInfo_AlertStateDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_AlertStateDesc proto.InternalMessageInfo + +func (m *AlertStateDesc) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func (m *AlertStateDesc) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *AlertStateDesc) GetActiveAt() time.Time { + if m != nil { + return m.ActiveAt + } + return time.Time{} +} + +func (m *AlertStateDesc) GetFiredAt() time.Time { + if m != nil { + return m.FiredAt + } + return time.Time{} +} + +func (m *AlertStateDesc) GetResolvedAt() time.Time { + if m != nil { + return m.ResolvedAt + } + return time.Time{} +} + +func (m *AlertStateDesc) GetLastSentAt() time.Time { + if m != nil { + return m.LastSentAt + } + return time.Time{} +} + +func (m *AlertStateDesc) GetValidUntil() time.Time { + if m != nil { + return m.ValidUntil + } + return time.Time{} +} + +func init() { + proto.RegisterType((*RulesRequest)(nil), "ruler.RulesRequest") + proto.RegisterType((*RulesResponse)(nil), "ruler.RulesResponse") + proto.RegisterType((*GroupStateDesc)(nil), "ruler.GroupStateDesc") + proto.RegisterType((*RuleStateDesc)(nil), "ruler.RuleStateDesc") + proto.RegisterType((*AlertStateDesc)(nil), "ruler.AlertStateDesc") +} + +func init() { proto.RegisterFile("ruler.proto", fileDescriptor_9ecbec0a4cfddea6) } + +var fileDescriptor_9ecbec0a4cfddea6 = []byte{ + // 686 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0x3d, 0x6f, 0xd3, 0x4e, + 0x1c, 0xf6, 0xa5, 0x75, 0x9a, 0x5c, 0xfa, 0xef, 0x5f, 0x5c, 0x03, 0x32, 0x11, 0xba, 0x54, 0x61, + 0xa9, 0x90, 0xea, 0x48, 0xa5, 0x12, 0x03, 0xe2, 0x25, 0x55, 0x0b, 0x0c, 0x0c, 0xc8, 0x05, 0xd6, + 0xea, 0x92, 0x5c, 0x5d, 0x83, 0xeb, 0x33, 0x77, 0xe7, 0x88, 0x05, 0x89, 0x99, 0xa9, 0x23, 0x33, + 0x13, 0x1f, 0xa5, 0x63, 0xc7, 0x0a, 0xa1, 0x42, 0xdd, 0x85, 0xb1, 0x1f, 0x01, 0xdd, 0x8b, 0x89, + 0x03, 0x45, 0x22, 0xa0, 0x2e, 0x96, 0x7f, 0x2f, 0xcf, 0xf3, 0xdc, 0x3d, 0xbf, 0xbb, 0x83, 0x0d, + 0x9e, 0xc5, 0x94, 0xfb, 0x29, 0x67, 0x92, 0x21, 0x57, 0x07, 0xad, 0x95, 0x30, 0x92, 0xbb, 0x59, + 0xdf, 0x1f, 0xb0, 0xbd, 0x6e, 0xc8, 0x42, 0xd6, 0xd5, 0xd5, 0x7e, 0xb6, 0xa3, 0x23, 0x1d, 0xe8, + 0x3f, 0x83, 0x6a, 0xe1, 0x90, 0xb1, 0x30, 0xa6, 0xe3, 0xae, 0x61, 0xc6, 0x89, 0x8c, 0x58, 0x62, + 0xeb, 0xed, 0x9f, 0xeb, 0x32, 0xda, 0xa3, 0x42, 0x92, 0xbd, 0xd4, 0x36, 0xdc, 0x2f, 0xe9, 0x0d, + 0x18, 0x97, 0xf4, 0x75, 0xca, 0xd9, 0x0b, 0x3a, 0x90, 0x36, 0xea, 0xa6, 0x2f, 0xc3, 0x6e, 0x94, + 0x84, 0x54, 0x48, 0xca, 0xbb, 0x83, 0x38, 0xa2, 0x49, 0x51, 0xb2, 0x0c, 0xb7, 0xff, 0x84, 0x41, + 0x6f, 0x4e, 0x7f, 0x85, 0xf9, 0x1a, 0x70, 0x67, 0x01, 0xce, 0x07, 0x2a, 0x0c, 0xe8, 0xab, 0x8c, + 0x0a, 0xd9, 0xb9, 0x0b, 0xff, 0xb3, 0xb1, 0x48, 0x59, 0x22, 0x28, 0x5a, 0x81, 0xd5, 0x90, 0xb3, + 0x2c, 0x15, 0x1e, 0x58, 0x9a, 0x59, 0x6e, 0xac, 0x5e, 0xf6, 0x8d, 0x69, 0x0f, 0x55, 0x72, 0x4b, + 0x12, 0x49, 0x37, 0xa8, 0x18, 0x04, 0xb6, 0xa9, 0xf3, 0xa1, 0x02, 0x17, 0x26, 0x4b, 0xe8, 0x06, + 0x74, 0x75, 0xd1, 0x03, 0x4b, 0x60, 0xb9, 0xb1, 0xda, 0xf4, 0x8d, 0xbe, 0x92, 0xd1, 0x9d, 0x1a, + 0x6f, 0x5a, 0xd0, 0x2d, 0x38, 0x4f, 0x06, 0x32, 0x1a, 0xd1, 0x6d, 0xdd, 0xe4, 0x55, 0xb4, 0x66, + 0xd3, 0x6a, 0x2a, 0xc8, 0x58, 0xb2, 0x61, 0x3a, 0xf5, 0x72, 0xd1, 0x73, 0xb8, 0x48, 0x47, 0x24, + 0xce, 0xb4, 0xf7, 0x4f, 0x0b, 0x8f, 0xbd, 0x19, 0x2d, 0xd9, 0xf2, 0xcd, 0x14, 0xfc, 0x62, 0x0a, + 0xfe, 0x8f, 0x8e, 0xf5, 0xda, 0xc1, 0x71, 0xdb, 0xd9, 0xff, 0xd2, 0x06, 0xc1, 0x79, 0x04, 0x68, + 0x0b, 0xa2, 0x71, 0x7a, 0xc3, 0xce, 0xd6, 0x9b, 0xd5, 0xb4, 0x57, 0x7f, 0xa1, 0x2d, 0x1a, 0x0c, + 0xeb, 0x7b, 0xc5, 0x7a, 0x0e, 0xbc, 0xf3, 0xb9, 0x62, 0x5c, 0x1e, 0x7b, 0x74, 0x1d, 0xce, 0xaa, + 0x2d, 0x5a, 0x8b, 0xfe, 0x2f, 0x59, 0xa4, 0xb7, 0xaa, 0x8b, 0xa8, 0x09, 0x5d, 0xa1, 0x10, 0x5e, + 0x65, 0x09, 0x2c, 0xd7, 0x03, 0x13, 0xa0, 0x2b, 0xb0, 0xba, 0x4b, 0x49, 0x2c, 0x77, 0xf5, 0x66, + 0xeb, 0x81, 0x8d, 0xd0, 0x35, 0x58, 0x8f, 0x89, 0x90, 0x9b, 0x9c, 0x33, 0xae, 0x17, 0x5c, 0x0f, + 0xc6, 0x09, 0x35, 0x56, 0x12, 0x53, 0x2e, 0x85, 0xe7, 0x4e, 0x8c, 0xb5, 0xa7, 0x92, 0xa5, 0xb1, + 0x9a, 0xa6, 0xdf, 0xd9, 0x5b, 0xbd, 0x18, 0x7b, 0xe7, 0xfe, 0xcd, 0xde, 0x77, 0x2e, 0x5c, 0x98, + 0xdc, 0xc7, 0xd8, 0x3a, 0x50, 0xb6, 0x4e, 0xc0, 0x6a, 0x4c, 0xfa, 0x34, 0x2e, 0xce, 0xd9, 0x25, + 0xdf, 0x5e, 0xac, 0xc7, 0x2a, 0xfb, 0x84, 0x44, 0x7c, 0xfd, 0x91, 0x52, 0xfa, 0x74, 0xdc, 0xfe, + 0x9b, 0x6b, 0x6a, 0x68, 0x7a, 0x43, 0x92, 0x4a, 0xca, 0x03, 0x2b, 0x85, 0xde, 0xc0, 0x06, 0x49, + 0x12, 0x26, 0xf5, 0x5a, 0x85, 0x37, 0x73, 0xf1, 0xca, 0x65, 0x3d, 0xe5, 0x84, 0x72, 0x8c, 0xea, + 0x23, 0x01, 0x02, 0x13, 0xa0, 0x1e, 0xac, 0xdb, 0x7b, 0x47, 0xa4, 0xe7, 0x4e, 0x31, 0xd5, 0x9a, + 0x81, 0xf5, 0x24, 0xba, 0x07, 0x6b, 0x3b, 0x11, 0xa7, 0x43, 0xc5, 0x30, 0xcd, 0xb9, 0x98, 0xd3, + 0xa8, 0x9e, 0x44, 0x9b, 0xb0, 0xc1, 0xa9, 0x60, 0xf1, 0xc8, 0x70, 0xcc, 0x4d, 0xc1, 0x01, 0x0b, + 0x60, 0x4f, 0xa2, 0x07, 0x70, 0x5e, 0x1d, 0xf3, 0x6d, 0x41, 0x13, 0xa9, 0x78, 0x6a, 0xd3, 0xf0, + 0x28, 0xe4, 0x16, 0x4d, 0xa4, 0x59, 0xce, 0x88, 0xc4, 0xd1, 0x70, 0x3b, 0x4b, 0x64, 0x14, 0x7b, + 0xf5, 0x69, 0x68, 0x34, 0xf0, 0x99, 0xc2, 0xad, 0xde, 0x81, 0xae, 0xba, 0xc6, 0x1c, 0xad, 0x99, + 0x1f, 0x81, 0x16, 0x4b, 0xaf, 0x59, 0xf1, 0xee, 0xb6, 0x9a, 0x93, 0x49, 0xf3, 0xf8, 0x76, 0x9c, + 0xf5, 0xb5, 0xc3, 0x13, 0xec, 0x1c, 0x9d, 0x60, 0xe7, 0xec, 0x04, 0x83, 0xb7, 0x39, 0x06, 0x1f, + 0x73, 0x0c, 0x0e, 0x72, 0x0c, 0x0e, 0x73, 0x0c, 0xbe, 0xe6, 0x18, 0x7c, 0xcb, 0xb1, 0x73, 0x96, + 0x63, 0xb0, 0x7f, 0x8a, 0x9d, 0xc3, 0x53, 0xec, 0x1c, 0x9d, 0x62, 0xa7, 0x5f, 0xd5, 0xcb, 0xbb, + 0xf9, 0x3d, 0x00, 0x00, 0xff, 0xff, 0x22, 0x68, 0xe6, 0xe5, 0xe1, 0x06, 0x00, 0x00, +} + +func (this *RulesRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RulesRequest) + if !ok { + that2, ok := that.(RulesRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *RulesResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RulesResponse) + if !ok { + that2, ok := that.(RulesResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Groups) != len(that1.Groups) { + return false + } + for i := range this.Groups { + if !this.Groups[i].Equal(that1.Groups[i]) { + return false + } + } + return true +} +func (this *GroupStateDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GroupStateDesc) + if !ok { + that2, ok := that.(GroupStateDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Group.Equal(that1.Group) { + return false + } + if len(this.ActiveRules) != len(that1.ActiveRules) { + return false + } + for i := range this.ActiveRules { + if !this.ActiveRules[i].Equal(that1.ActiveRules[i]) { + return false + } + } + if !this.EvaluationTimestamp.Equal(that1.EvaluationTimestamp) { + return false + } + if this.EvaluationDuration != that1.EvaluationDuration { + return false + } + return true +} +func (this *RuleStateDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RuleStateDesc) + if !ok { + that2, ok := that.(RuleStateDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Rule.Equal(that1.Rule) { + return false + } + if this.State != that1.State { + return false + } + if this.Health != that1.Health { + return false + } + if this.LastError != that1.LastError { + return false + } + if len(this.Alerts) != len(that1.Alerts) { + return false + } + for i := range this.Alerts { + if !this.Alerts[i].Equal(that1.Alerts[i]) { + return false + } + } + if !this.EvaluationTimestamp.Equal(that1.EvaluationTimestamp) { + return false + } + if this.EvaluationDuration != that1.EvaluationDuration { + return false + } + return true +} +func (this *AlertStateDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*AlertStateDesc) + if !ok { + that2, ok := that.(AlertStateDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.State != that1.State { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + if len(this.Annotations) != len(that1.Annotations) { + return false + } + for i := range this.Annotations { + if !this.Annotations[i].Equal(that1.Annotations[i]) { + return false + } + } + if this.Value != that1.Value { + return false + } + if !this.ActiveAt.Equal(that1.ActiveAt) { + return false + } + if !this.FiredAt.Equal(that1.FiredAt) { + return false + } + if !this.ResolvedAt.Equal(that1.ResolvedAt) { + return false + } + if !this.LastSentAt.Equal(that1.LastSentAt) { + return false + } + if !this.ValidUntil.Equal(that1.ValidUntil) { + return false + } + return true +} +func (this *RulesRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&ruler.RulesRequest{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RulesResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&ruler.RulesResponse{") + if this.Groups != nil { + s = append(s, "Groups: "+fmt.Sprintf("%#v", this.Groups)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GroupStateDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&ruler.GroupStateDesc{") + if this.Group != nil { + s = append(s, "Group: "+fmt.Sprintf("%#v", this.Group)+",\n") + } + if this.ActiveRules != nil { + s = append(s, "ActiveRules: "+fmt.Sprintf("%#v", this.ActiveRules)+",\n") + } + s = append(s, "EvaluationTimestamp: "+fmt.Sprintf("%#v", this.EvaluationTimestamp)+",\n") + s = append(s, "EvaluationDuration: "+fmt.Sprintf("%#v", this.EvaluationDuration)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RuleStateDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&ruler.RuleStateDesc{") + if this.Rule != nil { + s = append(s, "Rule: "+fmt.Sprintf("%#v", this.Rule)+",\n") + } + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + s = append(s, "Health: "+fmt.Sprintf("%#v", this.Health)+",\n") + s = append(s, "LastError: "+fmt.Sprintf("%#v", this.LastError)+",\n") + if this.Alerts != nil { + s = append(s, "Alerts: "+fmt.Sprintf("%#v", this.Alerts)+",\n") + } + s = append(s, "EvaluationTimestamp: "+fmt.Sprintf("%#v", this.EvaluationTimestamp)+",\n") + s = append(s, "EvaluationDuration: "+fmt.Sprintf("%#v", this.EvaluationDuration)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *AlertStateDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 13) + s = append(s, "&ruler.AlertStateDesc{") + s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + s = append(s, "Annotations: "+fmt.Sprintf("%#v", this.Annotations)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "ActiveAt: "+fmt.Sprintf("%#v", this.ActiveAt)+",\n") + s = append(s, "FiredAt: "+fmt.Sprintf("%#v", this.FiredAt)+",\n") + s = append(s, "ResolvedAt: "+fmt.Sprintf("%#v", this.ResolvedAt)+",\n") + s = append(s, "LastSentAt: "+fmt.Sprintf("%#v", this.LastSentAt)+",\n") + s = append(s, "ValidUntil: "+fmt.Sprintf("%#v", this.ValidUntil)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringRuler(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// RulerClient is the client API for Ruler service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type RulerClient interface { + Rules(ctx context.Context, in *RulesRequest, opts ...grpc.CallOption) (*RulesResponse, error) +} + +type rulerClient struct { + cc *grpc.ClientConn +} + +func NewRulerClient(cc *grpc.ClientConn) RulerClient { + return &rulerClient{cc} +} + +func (c *rulerClient) Rules(ctx context.Context, in *RulesRequest, opts ...grpc.CallOption) (*RulesResponse, error) { + out := new(RulesResponse) + err := c.cc.Invoke(ctx, "/ruler.Ruler/Rules", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RulerServer is the server API for Ruler service. +type RulerServer interface { + Rules(context.Context, *RulesRequest) (*RulesResponse, error) +} + +// UnimplementedRulerServer can be embedded to have forward compatible implementations. +type UnimplementedRulerServer struct { +} + +func (*UnimplementedRulerServer) Rules(ctx context.Context, req *RulesRequest) (*RulesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Rules not implemented") +} + +func RegisterRulerServer(s *grpc.Server, srv RulerServer) { + s.RegisterService(&_Ruler_serviceDesc, srv) +} + +func _Ruler_Rules_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RulesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RulerServer).Rules(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ruler.Ruler/Rules", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RulerServer).Rules(ctx, req.(*RulesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Ruler_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ruler.Ruler", + HandlerType: (*RulerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Rules", + Handler: _Ruler_Rules_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ruler.proto", +} + +func (m *RulesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RulesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RulesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RulesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RulesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RulesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRuler(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GroupStateDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupStateDesc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupStateDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n1, err1 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.EvaluationDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.EvaluationDuration):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintRuler(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x22 + n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EvaluationTimestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EvaluationTimestamp):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintRuler(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x1a + if len(m.ActiveRules) > 0 { + for iNdEx := len(m.ActiveRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ActiveRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRuler(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Group != nil { + { + size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRuler(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuleStateDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleStateDesc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleStateDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n4, err4 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.EvaluationDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.EvaluationDuration):]) + if err4 != nil { + return 0, err4 + } + i -= n4 + i = encodeVarintRuler(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x3a + n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EvaluationTimestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EvaluationTimestamp):]) + if err5 != nil { + return 0, err5 + } + i -= n5 + i = encodeVarintRuler(dAtA, i, uint64(n5)) + i-- + dAtA[i] = 0x32 + if len(m.Alerts) > 0 { + for iNdEx := len(m.Alerts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Alerts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRuler(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.LastError) > 0 { + i -= len(m.LastError) + copy(dAtA[i:], m.LastError) + i = encodeVarintRuler(dAtA, i, uint64(len(m.LastError))) + i-- + dAtA[i] = 0x22 + } + if len(m.Health) > 0 { + i -= len(m.Health) + copy(dAtA[i:], m.Health) + i = encodeVarintRuler(dAtA, i, uint64(len(m.Health))) + i-- + dAtA[i] = 0x1a + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintRuler(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x12 + } + if m.Rule != nil { + { + size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRuler(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AlertStateDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlertStateDesc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AlertStateDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ValidUntil, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ValidUntil):]) + if err7 != nil { + return 0, err7 + } + i -= n7 + i = encodeVarintRuler(dAtA, i, uint64(n7)) + i-- + dAtA[i] = 0x4a + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastSentAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastSentAt):]) + if err8 != nil { + return 0, err8 + } + i -= n8 + i = encodeVarintRuler(dAtA, i, uint64(n8)) + i-- + dAtA[i] = 0x42 + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ResolvedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ResolvedAt):]) + if err9 != nil { + return 0, err9 + } + i -= n9 + i = encodeVarintRuler(dAtA, i, uint64(n9)) + i-- + dAtA[i] = 0x3a + n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.FiredAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.FiredAt):]) + if err10 != nil { + return 0, err10 + } + i -= n10 + i = encodeVarintRuler(dAtA, i, uint64(n10)) + i-- + dAtA[i] = 0x32 + n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ActiveAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ActiveAt):]) + if err11 != nil { + return 0, err11 + } + i -= n11 + i = encodeVarintRuler(dAtA, i, uint64(n11)) + i-- + dAtA[i] = 0x2a + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x21 + } + if len(m.Annotations) > 0 { + for iNdEx := len(m.Annotations) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Annotations[iNdEx].Size() + i -= size + if _, err := m.Annotations[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRuler(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRuler(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintRuler(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintRuler(dAtA []byte, offset int, v uint64) int { + offset -= sovRuler(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RulesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RulesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Groups) > 0 { + for _, e := range m.Groups { + l = e.Size() + n += 1 + l + sovRuler(uint64(l)) + } + } + return n +} + +func (m *GroupStateDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Group != nil { + l = m.Group.Size() + n += 1 + l + sovRuler(uint64(l)) + } + if len(m.ActiveRules) > 0 { + for _, e := range m.ActiveRules { + l = e.Size() + n += 1 + l + sovRuler(uint64(l)) + } + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.EvaluationTimestamp) + n += 1 + l + sovRuler(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.EvaluationDuration) + n += 1 + l + sovRuler(uint64(l)) + return n +} + +func (m *RuleStateDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Rule != nil { + l = m.Rule.Size() + n += 1 + l + sovRuler(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovRuler(uint64(l)) + } + l = len(m.Health) + if l > 0 { + n += 1 + l + sovRuler(uint64(l)) + } + l = len(m.LastError) + if l > 0 { + n += 1 + l + sovRuler(uint64(l)) + } + if len(m.Alerts) > 0 { + for _, e := range m.Alerts { + l = e.Size() + n += 1 + l + sovRuler(uint64(l)) + } + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.EvaluationTimestamp) + n += 1 + l + sovRuler(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.EvaluationDuration) + n += 1 + l + sovRuler(uint64(l)) + return n +} + +func (m *AlertStateDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.State) + if l > 0 { + n += 1 + l + sovRuler(uint64(l)) + } + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovRuler(uint64(l)) + } + } + if len(m.Annotations) > 0 { + for _, e := range m.Annotations { + l = e.Size() + n += 1 + l + sovRuler(uint64(l)) + } + } + if m.Value != 0 { + n += 9 + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ActiveAt) + n += 1 + l + sovRuler(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.FiredAt) + n += 1 + l + sovRuler(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ResolvedAt) + n += 1 + l + sovRuler(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.LastSentAt) + n += 1 + l + sovRuler(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ValidUntil) + n += 1 + l + sovRuler(uint64(l)) + return n +} + +func sovRuler(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRuler(x uint64) (n int) { + return sovRuler(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RulesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RulesRequest{`, + `}`, + }, "") + return s +} +func (this *RulesResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForGroups := "[]*GroupStateDesc{" + for _, f := range this.Groups { + repeatedStringForGroups += strings.Replace(f.String(), "GroupStateDesc", "GroupStateDesc", 1) + "," + } + repeatedStringForGroups += "}" + s := strings.Join([]string{`&RulesResponse{`, + `Groups:` + repeatedStringForGroups + `,`, + `}`, + }, "") + return s +} +func (this *GroupStateDesc) String() string { + if this == nil { + return "nil" + } + repeatedStringForActiveRules := "[]*RuleStateDesc{" + for _, f := range this.ActiveRules { + repeatedStringForActiveRules += strings.Replace(f.String(), "RuleStateDesc", "RuleStateDesc", 1) + "," + } + repeatedStringForActiveRules += "}" + s := strings.Join([]string{`&GroupStateDesc{`, + `Group:` + strings.Replace(fmt.Sprintf("%v", this.Group), "RuleGroupDesc", "rules.RuleGroupDesc", 1) + `,`, + `ActiveRules:` + repeatedStringForActiveRules + `,`, + `EvaluationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationTimestamp), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, + `EvaluationDuration:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationDuration), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RuleStateDesc) String() string { + if this == nil { + return "nil" + } + repeatedStringForAlerts := "[]*AlertStateDesc{" + for _, f := range this.Alerts { + repeatedStringForAlerts += strings.Replace(f.String(), "AlertStateDesc", "AlertStateDesc", 1) + "," + } + repeatedStringForAlerts += "}" + s := strings.Join([]string{`&RuleStateDesc{`, + `Rule:` + strings.Replace(fmt.Sprintf("%v", this.Rule), "RuleDesc", "rules.RuleDesc", 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Health:` + fmt.Sprintf("%v", this.Health) + `,`, + `LastError:` + fmt.Sprintf("%v", this.LastError) + `,`, + `Alerts:` + repeatedStringForAlerts + `,`, + `EvaluationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationTimestamp), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, + `EvaluationDuration:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvaluationDuration), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AlertStateDesc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AlertStateDesc{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `ActiveAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ActiveAt), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, + `FiredAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FiredAt), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, + `ResolvedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResolvedAt), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, + `LastSentAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastSentAt), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, + `ValidUntil:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ValidUntil), "Timestamp", "timestamp.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringRuler(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RulesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RulesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRuler(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRuler + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRuler + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RulesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RulesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, &GroupStateDesc{}) + if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRuler(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRuler + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRuler + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupStateDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupStateDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupStateDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Group == nil { + m.Group = &rules.RuleGroupDesc{} + } + if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActiveRules = append(m.ActiveRules, &RuleStateDesc{}) + if err := m.ActiveRules[len(m.ActiveRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.EvaluationTimestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationDuration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.EvaluationDuration, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRuler(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRuler + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRuler + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleStateDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleStateDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleStateDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Rule == nil { + m.Rule = &rules.RuleDesc{} + } + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Health", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Health = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alerts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Alerts = append(m.Alerts, &AlertStateDesc{}) + if err := m.Alerts[len(m.Alerts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.EvaluationTimestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationDuration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.EvaluationDuration, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRuler(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRuler + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRuler + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlertStateDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlertStateDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlertStateDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotations = append(m.Annotations, github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter{}) + if err := m.Annotations[len(m.Annotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ActiveAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FiredAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.FiredAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResolvedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ResolvedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastSentAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.LastSentAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidUntil", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRuler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRuler + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRuler + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ValidUntil, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRuler(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRuler + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRuler + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRuler(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRuler + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRuler + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRuler + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRuler + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthRuler + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRuler + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRuler(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthRuler + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRuler = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRuler = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.proto b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.proto new file mode 100644 index 000000000000..18f602b89d9e --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.proto @@ -0,0 +1,70 @@ +// Ruler Service Representation +// This service is used to retrieve the current state of rules running across +// all Rulers in a cluster. It allows cortex to fully serve the `/api/v1/{rules|alerts}` +// Prometheus API +syntax = "proto3"; +package ruler; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto"; +import "github.com/cortexproject/cortex/pkg/ruler/rules/rules.proto"; + + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +service Ruler { + rpc Rules(RulesRequest) returns (RulesResponse) {}; +} + +message RulesRequest {} + +message RulesResponse { + repeated GroupStateDesc groups = 1; +} + +// GroupStateDesc is a proto representation of a cortex rule group +message GroupStateDesc { + rules.RuleGroupDesc group = 1; + repeated RuleStateDesc active_rules = 2; + google.protobuf.Timestamp evaluationTimestamp = 3 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + google.protobuf.Duration evaluationDuration = 4 [(gogoproto.nullable) = false,(gogoproto.stdduration) = true]; +} + +// RuleStateDesc is a proto representation of a Prometheus Rule +message RuleStateDesc { + rules.RuleDesc rule = 1; + string state = 2; + string health = 3; + string lastError = 4; + repeated AlertStateDesc alerts = 5; + google.protobuf.Timestamp evaluationTimestamp = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + google.protobuf.Duration evaluationDuration = 7 [(gogoproto.nullable) = false,(gogoproto.stdduration) = true]; +} + +message AlertStateDesc { + string state = 1; + repeated cortex.LabelPair labels = 2 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = + "github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter" + ]; + repeated cortex.LabelPair annotations = 3 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = + "github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter" + ]; + double value = 4; + google.protobuf.Timestamp active_at = 5 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + google.protobuf.Timestamp fired_at = 6 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + google.protobuf.Timestamp resolved_at = 7 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + google.protobuf.Timestamp last_sent_at = 8 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + google.protobuf.Timestamp valid_until = 9 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; +} \ No newline at end of file diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go new file mode 100644 index 000000000000..fb45cb2c8b1d --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go @@ -0,0 +1,93 @@ +package ruler + +import ( + "flag" + "os" + "time" + + "github.com/go-kit/kit/log/level" + + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" +) + +// RingConfig masks the ring lifecycler config which contains +// many options not really required by the rulers ring. This config +// is used to strip down the config to the minimum, and avoid confusion +// to the user. +type RingConfig struct { + KVStore kv.Config `yaml:"kvstore"` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + + // Instance details + InstanceID string `yaml:"instance_id" doc:"hidden"` + InstanceInterfaceNames []string `yaml:"instance_interface_names" doc:"hidden"` + InstancePort int `yaml:"instance_port" doc:"hidden"` + InstanceAddr string `yaml:"instance_addr" doc:"hidden"` + NumTokens int `yaml:"num_tokens"` + + // Injected internally + ListenPort int `yaml:"-"` + + // Used for testing + SkipUnregister bool `yaml:"-"` +} + +// RegisterFlags adds the flags required to config this to the given FlagSet +func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { + hostname, err := os.Hostname() + if err != nil { + level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err) + os.Exit(1) + } + + // Ring flags + cfg.KVStore.RegisterFlagsWithPrefix("ruler.ring.", "rulers/", f) + f.DurationVar(&cfg.HeartbeatPeriod, "ruler.ring.heartbeat-period", 5*time.Second, "Period at which to heartbeat to the ring.") + f.DurationVar(&cfg.HeartbeatTimeout, "ruler.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which rulers are considered unhealthy within the ring.") + + // Instance flags + cfg.InstanceInterfaceNames = []string{"eth0", "en0"} + f.Var((*flagext.Strings)(&cfg.InstanceInterfaceNames), "ruler.ring.instance-interface", "Name of network interface to read address from.") + f.StringVar(&cfg.InstanceAddr, "ruler.ring.instance-addr", "", "IP address to advertise in the ring.") + f.IntVar(&cfg.InstancePort, "ruler.ring.instance-port", 0, "Port to advertise in the ring (defaults to server.grpc-listen-port).") + f.StringVar(&cfg.InstanceID, "ruler.ring.instance-id", hostname, "Instance ID to register in the ring.") + f.IntVar(&cfg.NumTokens, "ruler.ring.num-tokens", 128, "Number of tokens for each ingester.") +} + +// ToLifecyclerConfig returns a LifecyclerConfig based on the ruler +// ring config. +func (cfg *RingConfig) ToLifecyclerConfig() ring.LifecyclerConfig { + // We have to make sure that the ring.LifecyclerConfig and ring.Config + // defaults are preserved + lc := ring.LifecyclerConfig{} + rc := ring.Config{} + + flagext.DefaultValues(&lc) + flagext.DefaultValues(&rc) + + // Configure ring + rc.KVStore = cfg.KVStore + rc.HeartbeatTimeout = cfg.HeartbeatTimeout + rc.ReplicationFactor = 1 + + // Configure lifecycler + lc.RingConfig = rc + lc.ListenPort = &cfg.ListenPort + lc.Addr = cfg.InstanceAddr + lc.Port = cfg.InstancePort + lc.ID = cfg.InstanceID + lc.InfNames = cfg.InstanceInterfaceNames + lc.SkipUnregister = cfg.SkipUnregister + lc.HeartbeatPeriod = cfg.HeartbeatPeriod + lc.NumTokens = cfg.NumTokens + lc.ObservePeriod = 0 + lc.JoinAfter = 0 + lc.MinReadyDuration = 0 + lc.FinalSleep = 0 + + return lc +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/compat.go new file mode 100644 index 000000000000..fed3c549feea --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/compat.go @@ -0,0 +1,64 @@ +package rules + +import ( + time "time" + + "github.com/cortexproject/cortex/pkg/ingester/client" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + + legacy_rulefmt "github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt" +) + +// ToProto transforms a formatted prometheus rulegroup to a rule group protobuf +func ToProto(user string, namespace string, rl legacy_rulefmt.RuleGroup) *RuleGroupDesc { + rg := RuleGroupDesc{ + Name: rl.Name, + Namespace: namespace, + Interval: time.Duration(rl.Interval), + Rules: formattedRuleToProto(rl.Rules), + User: user, + } + return &rg +} + +func formattedRuleToProto(rls []legacy_rulefmt.Rule) []*RuleDesc { + rules := make([]*RuleDesc, len(rls)) + for i := range rls { + rules[i] = &RuleDesc{ + Expr: rls[i].Expr, + Record: rls[i].Record, + Alert: rls[i].Alert, + For: time.Duration(rls[i].For), + Labels: client.FromLabelsToLabelAdapters(labels.FromMap(rls[i].Labels)), + Annotations: client.FromLabelsToLabelAdapters(labels.FromMap(rls[i].Annotations)), + } + } + + return rules +} + +// FromProto generates a rulefmt RuleGroup +func FromProto(rg *RuleGroupDesc) legacy_rulefmt.RuleGroup { + formattedRuleGroup := legacy_rulefmt.RuleGroup{ + Name: rg.GetName(), + Interval: model.Duration(rg.Interval), + Rules: make([]legacy_rulefmt.Rule, len(rg.GetRules())), + } + + for i, rl := range rg.GetRules() { + newRule := legacy_rulefmt.Rule{ + Record: rl.GetRecord(), + Alert: rl.GetAlert(), + Expr: rl.GetExpr(), + Labels: client.FromLabelAdaptersToLabels(rl.Labels).Map(), + Annotations: client.FromLabelAdaptersToLabels(rl.Annotations).Map(), + For: model.Duration(rl.GetFor()), + } + + formattedRuleGroup.Rules[i] = newRule + } + + return formattedRuleGroup +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go new file mode 100644 index 000000000000..f03a3d5dbf73 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go @@ -0,0 +1,164 @@ +package objectclient + +import ( + "bytes" + "context" + "io/ioutil" + strings "strings" + + "github.com/go-kit/kit/log/level" + proto "github.com/gogo/protobuf/proto" + + "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/ruler/rules" + "github.com/cortexproject/cortex/pkg/util" +) + +// Object Rule Storage Schema +// ======================= +// Object Name: "rules///" +// Storage Format: Encoded RuleGroupDesc + +const ( + rulePrefix = "rules/" +) + +// RuleStore allows cortex rules to be stored using an object store backend. +type RuleStore struct { + client chunk.ObjectClient +} + +// NewRuleStore returns a new RuleStore +func NewRuleStore(client chunk.ObjectClient) *RuleStore { + return &RuleStore{ + client: client, + } +} + +func (o *RuleStore) getRuleGroup(ctx context.Context, objectKey string) (*rules.RuleGroupDesc, error) { + reader, err := o.client.GetObject(ctx, objectKey) + if err == chunk.ErrStorageObjectNotFound { + level.Debug(util.Logger).Log("msg", "rule group does not exist", "name", objectKey) + return nil, rules.ErrGroupNotFound + } + + if err != nil { + return nil, err + } + defer reader.Close() + + buf, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + rg := &rules.RuleGroupDesc{} + + err = proto.Unmarshal(buf, rg) + if err != nil { + return nil, err + } + + return rg, nil +} + +// ListAllRuleGroups returns all the active rule groups +func (o *RuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rules.RuleGroupList, error) { + ruleGroupObjects, err := o.client.List(ctx, generateRuleObjectKey("", "", "")) + if err != nil { + return nil, err + } + + userGroupMap := map[string]rules.RuleGroupList{} + for _, obj := range ruleGroupObjects { + + user := decomposeRuleObjectKey(obj.Key) + if user == "" { + continue + } + + rg, err := o.getRuleGroup(ctx, obj.Key) + if err != nil { + return nil, err + } + + if _, exists := userGroupMap[user]; !exists { + userGroupMap[user] = rules.RuleGroupList{} + } + userGroupMap[user] = append(userGroupMap[user], rg) + } + + return userGroupMap, nil +} + +// ListRuleGroups returns all the active rule groups for a user +func (o *RuleStore) ListRuleGroups(ctx context.Context, userID, namespace string) (rules.RuleGroupList, error) { + ruleGroupObjects, err := o.client.List(ctx, generateRuleObjectKey(userID, namespace, "")) + if err != nil { + return nil, err + } + + groups := []*rules.RuleGroupDesc{} + for _, obj := range ruleGroupObjects { + level.Debug(util.Logger).Log("msg", "listing rule group", "key", obj.Key) + + rg, err := o.getRuleGroup(ctx, obj.Key) + if err != nil { + level.Error(util.Logger).Log("msg", "unable to retrieve rule group", "err", err, "key", obj.Key) + return nil, err + } + groups = append(groups, rg) + } + return groups, nil +} + +// GetRuleGroup returns the requested rule group +func (o *RuleStore) GetRuleGroup(ctx context.Context, userID string, namespace string, grp string) (*rules.RuleGroupDesc, error) { + handle := generateRuleObjectKey(userID, namespace, grp) + rg, err := o.getRuleGroup(ctx, handle) + if err != nil { + return nil, err + } + + return rg, nil +} + +// SetRuleGroup sets provided rule group +func (o *RuleStore) SetRuleGroup(ctx context.Context, userID string, namespace string, group *rules.RuleGroupDesc) error { + data, err := proto.Marshal(group) + if err != nil { + return err + } + + objectKey := generateRuleObjectKey(userID, namespace, group.Name) + return o.client.PutObject(ctx, objectKey, bytes.NewReader(data)) +} + +// DeleteRuleGroup deletes the specified rule group +func (o *RuleStore) DeleteRuleGroup(ctx context.Context, userID string, namespace string, groupName string) error { + objectKey := generateRuleObjectKey(userID, namespace, groupName) + err := o.client.DeleteObject(ctx, objectKey) + if err == chunk.ErrStorageObjectNotFound { + return rules.ErrGroupNotFound + } + return err +} + +func generateRuleObjectKey(id, namespace, name string) string { + if id == "" { + return rulePrefix + } + prefix := rulePrefix + id + "/" + if namespace == "" { + return prefix + } + return prefix + namespace + "/" + name +} + +func decomposeRuleObjectKey(handle string) string { + components := strings.Split(handle, "/") + if len(components) != 4 { + return "" + } + return components[1] +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/rules.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/rules.pb.go new file mode 100644 index 000000000000..dfdcb8ba12c8 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/rules.pb.go @@ -0,0 +1,1193 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: rules.proto + +package rules + +import ( + fmt "fmt" + _ "github.com/cortexproject/cortex/pkg/ingester/client" + github_com_cortexproject_cortex_pkg_ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + _ "github.com/golang/protobuf/ptypes/duration" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// RuleGroupDesc is a proto representation of a cortex rule group +type RuleGroupDesc struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Interval time.Duration `protobuf:"bytes,3,opt,name=interval,proto3,stdduration" json:"interval"` + Rules []*RuleDesc `protobuf:"bytes,4,rep,name=rules,proto3" json:"rules,omitempty"` + User string `protobuf:"bytes,6,opt,name=user,proto3" json:"user,omitempty"` +} + +func (m *RuleGroupDesc) Reset() { *m = RuleGroupDesc{} } +func (*RuleGroupDesc) ProtoMessage() {} +func (*RuleGroupDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_8e722d3e922f0937, []int{0} +} +func (m *RuleGroupDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleGroupDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RuleGroupDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RuleGroupDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleGroupDesc.Merge(m, src) +} +func (m *RuleGroupDesc) XXX_Size() int { + return m.Size() +} +func (m *RuleGroupDesc) XXX_DiscardUnknown() { + xxx_messageInfo_RuleGroupDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleGroupDesc proto.InternalMessageInfo + +func (m *RuleGroupDesc) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *RuleGroupDesc) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *RuleGroupDesc) GetInterval() time.Duration { + if m != nil { + return m.Interval + } + return 0 +} + +func (m *RuleGroupDesc) GetRules() []*RuleDesc { + if m != nil { + return m.Rules + } + return nil +} + +func (m *RuleGroupDesc) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +// RuleDesc is a proto representation of a Prometheus Rule +type RuleDesc struct { + Expr string `protobuf:"bytes,1,opt,name=expr,proto3" json:"expr,omitempty"` + Record string `protobuf:"bytes,2,opt,name=record,proto3" json:"record,omitempty"` + Alert string `protobuf:"bytes,3,opt,name=alert,proto3" json:"alert,omitempty"` + For time.Duration `protobuf:"bytes,4,opt,name=for,proto3,stdduration" json:"for"` + Labels []github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter `protobuf:"bytes,5,rep,name=labels,proto3,customtype=github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter" json:"labels"` + Annotations []github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter `protobuf:"bytes,6,rep,name=annotations,proto3,customtype=github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter" json:"annotations"` +} + +func (m *RuleDesc) Reset() { *m = RuleDesc{} } +func (*RuleDesc) ProtoMessage() {} +func (*RuleDesc) Descriptor() ([]byte, []int) { + return fileDescriptor_8e722d3e922f0937, []int{1} +} +func (m *RuleDesc) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RuleDesc.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RuleDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleDesc.Merge(m, src) +} +func (m *RuleDesc) XXX_Size() int { + return m.Size() +} +func (m *RuleDesc) XXX_DiscardUnknown() { + xxx_messageInfo_RuleDesc.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleDesc proto.InternalMessageInfo + +func (m *RuleDesc) GetExpr() string { + if m != nil { + return m.Expr + } + return "" +} + +func (m *RuleDesc) GetRecord() string { + if m != nil { + return m.Record + } + return "" +} + +func (m *RuleDesc) GetAlert() string { + if m != nil { + return m.Alert + } + return "" +} + +func (m *RuleDesc) GetFor() time.Duration { + if m != nil { + return m.For + } + return 0 +} + +func init() { + proto.RegisterType((*RuleGroupDesc)(nil), "rules.RuleGroupDesc") + proto.RegisterType((*RuleDesc)(nil), "rules.RuleDesc") +} + +func init() { proto.RegisterFile("rules.proto", fileDescriptor_8e722d3e922f0937) } + +var fileDescriptor_8e722d3e922f0937 = []byte{ + // 456 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x51, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xce, 0x6c, 0xd3, 0x98, 0x4e, 0x59, 0xac, 0x83, 0xc8, 0xb8, 0xc8, 0xb4, 0x2c, 0x08, 0xbd, + 0x98, 0xc0, 0x8a, 0x27, 0x0f, 0x6a, 0x59, 0x50, 0x8a, 0x07, 0xc9, 0xd1, 0xdb, 0x34, 0x7d, 0x1b, + 0xa3, 0xb3, 0x99, 0x30, 0x33, 0x11, 0x2f, 0x82, 0x3f, 0xc1, 0xa3, 0x3f, 0xc1, 0x9f, 0xb2, 0x37, + 0x7b, 0x5c, 0x3c, 0xac, 0x36, 0xbd, 0x78, 0xdc, 0x3f, 0x20, 0xc8, 0xcc, 0xa4, 0x6e, 0x8f, 0x22, + 0xec, 0x29, 0xef, 0xcb, 0x9b, 0xef, 0xbd, 0xef, 0xfb, 0x1e, 0x1e, 0xaa, 0x46, 0x80, 0x4e, 0x6a, + 0x25, 0x8d, 0x24, 0x7d, 0x07, 0x0e, 0x1e, 0x14, 0xa5, 0x79, 0xd3, 0x2c, 0x92, 0x5c, 0x9e, 0xa6, + 0x85, 0x2c, 0x64, 0xea, 0xba, 0x8b, 0xe6, 0xc4, 0x21, 0x07, 0x5c, 0xe5, 0x59, 0x07, 0xac, 0x90, + 0xb2, 0x10, 0x70, 0xf5, 0x6a, 0xd9, 0x28, 0x6e, 0x4a, 0x59, 0x75, 0xfd, 0xa7, 0x3b, 0xe3, 0x72, + 0xa9, 0x0c, 0x7c, 0xa8, 0x95, 0x7c, 0x0b, 0xb9, 0xe9, 0x50, 0x5a, 0xbf, 0x2b, 0xd2, 0xb2, 0x2a, + 0x40, 0x1b, 0x50, 0x69, 0x2e, 0x4a, 0xa8, 0xb6, 0x2d, 0x3f, 0xe1, 0xf0, 0x1b, 0xc2, 0xfb, 0x59, + 0x23, 0xe0, 0xb9, 0x92, 0x4d, 0x7d, 0x0c, 0x3a, 0x27, 0x04, 0x87, 0x15, 0x3f, 0x05, 0x8a, 0x26, + 0x68, 0x3a, 0xc8, 0x5c, 0x4d, 0xee, 0xe1, 0x81, 0xfd, 0xea, 0x9a, 0xe7, 0x40, 0xf7, 0x5c, 0xe3, + 0xea, 0x07, 0x79, 0x82, 0xe3, 0xb2, 0x32, 0xa0, 0xde, 0x73, 0x41, 0x7b, 0x13, 0x34, 0x1d, 0x1e, + 0xdd, 0x4d, 0xbc, 0xf0, 0x64, 0x2b, 0x3c, 0x39, 0xee, 0x84, 0xcf, 0xe2, 0xb3, 0x8b, 0x71, 0xf0, + 0xe5, 0xc7, 0x18, 0x65, 0x7f, 0x49, 0xe4, 0x3e, 0xf6, 0xf1, 0xd0, 0x70, 0xd2, 0x9b, 0x0e, 0x8f, + 0x6e, 0x26, 0x3e, 0x39, 0xab, 0xcb, 0x4a, 0xca, 0x7c, 0xd7, 0x2a, 0x6b, 0x34, 0x28, 0x1a, 0x79, + 0x65, 0xb6, 0x9e, 0x87, 0x71, 0x7f, 0x14, 0xcd, 0xc3, 0xf8, 0xc6, 0x28, 0x9e, 0x87, 0x71, 0x3c, + 0x1a, 0x1c, 0xfe, 0xde, 0xc3, 0xf1, 0x96, 0x69, 0x29, 0x36, 0x93, 0xad, 0x19, 0x5b, 0x93, 0x3b, + 0x38, 0x52, 0x90, 0x4b, 0xb5, 0xec, 0x9c, 0x74, 0x88, 0xdc, 0xc6, 0x7d, 0x2e, 0x40, 0x19, 0xe7, + 0x61, 0x90, 0x79, 0x40, 0x1e, 0xe1, 0xde, 0x89, 0x54, 0x34, 0xfc, 0x77, 0x5f, 0xf6, 0x3d, 0xd1, + 0x38, 0x12, 0x7c, 0x01, 0x42, 0xd3, 0xbe, 0xf3, 0x74, 0x2b, 0xe9, 0x62, 0x7f, 0x69, 0xff, 0xbe, + 0xe2, 0xa5, 0x9a, 0xbd, 0xb0, 0x8c, 0xef, 0x17, 0xe3, 0xff, 0x39, 0xa2, 0x1f, 0xf3, 0x6c, 0xc9, + 0x6b, 0x03, 0x2a, 0xeb, 0x56, 0x91, 0x8f, 0x78, 0xc8, 0xab, 0x4a, 0x1a, 0xa7, 0x48, 0xd3, 0xe8, + 0xfa, 0x37, 0xef, 0xee, 0x73, 0x57, 0xd8, 0x9f, 0x3d, 0x5e, 0xad, 0x59, 0x70, 0xbe, 0x66, 0xc1, + 0xe5, 0x9a, 0xa1, 0x4f, 0x2d, 0x43, 0x5f, 0x5b, 0x86, 0xce, 0x5a, 0x86, 0x56, 0x2d, 0x43, 0x3f, + 0x5b, 0x86, 0x7e, 0xb5, 0x2c, 0xb8, 0x6c, 0x19, 0xfa, 0xbc, 0x61, 0xc1, 0x6a, 0xc3, 0x82, 0xf3, + 0x0d, 0x0b, 0x5e, 0xfb, 0x13, 0x2f, 0x22, 0x17, 0xec, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x15, 0x3f, 0x81, 0xc2, 0x3c, 0x03, 0x00, 0x00, +} + +func (this *RuleGroupDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RuleGroupDesc) + if !ok { + that2, ok := that.(RuleGroupDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Namespace != that1.Namespace { + return false + } + if this.Interval != that1.Interval { + return false + } + if len(this.Rules) != len(that1.Rules) { + return false + } + for i := range this.Rules { + if !this.Rules[i].Equal(that1.Rules[i]) { + return false + } + } + if this.User != that1.User { + return false + } + return true +} +func (this *RuleDesc) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RuleDesc) + if !ok { + that2, ok := that.(RuleDesc) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Expr != that1.Expr { + return false + } + if this.Record != that1.Record { + return false + } + if this.Alert != that1.Alert { + return false + } + if this.For != that1.For { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + if len(this.Annotations) != len(that1.Annotations) { + return false + } + for i := range this.Annotations { + if !this.Annotations[i].Equal(that1.Annotations[i]) { + return false + } + } + return true +} +func (this *RuleGroupDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&rules.RuleGroupDesc{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Namespace: "+fmt.Sprintf("%#v", this.Namespace)+",\n") + s = append(s, "Interval: "+fmt.Sprintf("%#v", this.Interval)+",\n") + if this.Rules != nil { + s = append(s, "Rules: "+fmt.Sprintf("%#v", this.Rules)+",\n") + } + s = append(s, "User: "+fmt.Sprintf("%#v", this.User)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RuleDesc) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&rules.RuleDesc{") + s = append(s, "Expr: "+fmt.Sprintf("%#v", this.Expr)+",\n") + s = append(s, "Record: "+fmt.Sprintf("%#v", this.Record)+",\n") + s = append(s, "Alert: "+fmt.Sprintf("%#v", this.Alert)+",\n") + s = append(s, "For: "+fmt.Sprintf("%#v", this.For)+",\n") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + s = append(s, "Annotations: "+fmt.Sprintf("%#v", this.Annotations)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringRules(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *RuleGroupDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleGroupDesc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleGroupDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintRules(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x32 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRules(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + n1, err1 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Interval, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.Interval):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintRules(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x1a + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintRules(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintRules(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuleDesc) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleDesc) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + for iNdEx := len(m.Annotations) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Annotations[iNdEx].Size() + i -= size + if _, err := m.Annotations[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRules(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRules(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + n2, err2 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.For, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.For):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintRules(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x22 + if len(m.Alert) > 0 { + i -= len(m.Alert) + copy(dAtA[i:], m.Alert) + i = encodeVarintRules(dAtA, i, uint64(len(m.Alert))) + i-- + dAtA[i] = 0x1a + } + if len(m.Record) > 0 { + i -= len(m.Record) + copy(dAtA[i:], m.Record) + i = encodeVarintRules(dAtA, i, uint64(len(m.Record))) + i-- + dAtA[i] = 0x12 + } + if len(m.Expr) > 0 { + i -= len(m.Expr) + copy(dAtA[i:], m.Expr) + i = encodeVarintRules(dAtA, i, uint64(len(m.Expr))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintRules(dAtA []byte, offset int, v uint64) int { + offset -= sovRules(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RuleGroupDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovRules(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovRules(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.Interval) + n += 1 + l + sovRules(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovRules(uint64(l)) + } + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovRules(uint64(l)) + } + return n +} + +func (m *RuleDesc) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Expr) + if l > 0 { + n += 1 + l + sovRules(uint64(l)) + } + l = len(m.Record) + if l > 0 { + n += 1 + l + sovRules(uint64(l)) + } + l = len(m.Alert) + if l > 0 { + n += 1 + l + sovRules(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.For) + n += 1 + l + sovRules(uint64(l)) + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovRules(uint64(l)) + } + } + if len(m.Annotations) > 0 { + for _, e := range m.Annotations { + l = e.Size() + n += 1 + l + sovRules(uint64(l)) + } + } + return n +} + +func sovRules(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRules(x uint64) (n int) { + return sovRules(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RuleGroupDesc) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]*RuleDesc{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(f.String(), "RuleDesc", "RuleDesc", 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&RuleGroupDesc{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Interval:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Interval), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `}`, + }, "") + return s +} +func (this *RuleDesc) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleDesc{`, + `Expr:` + fmt.Sprintf("%v", this.Expr) + `,`, + `Record:` + fmt.Sprintf("%v", this.Record) + `,`, + `Alert:` + fmt.Sprintf("%v", this.Alert) + `,`, + `For:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.For), "Duration", "duration.Duration", 1), `&`, ``, 1) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, + `}`, + }, "") + return s +} +func valueToStringRules(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RuleGroupDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleGroupDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleGroupDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.Interval, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, &RuleDesc{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRules(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRules + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRules + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleDesc) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleDesc: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleDesc: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Record = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alert", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Alert = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field For", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.For, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRules + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRules + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRules + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Annotations = append(m.Annotations, github_com_cortexproject_cortex_pkg_ingester_client.LabelAdapter{}) + if err := m.Annotations[len(m.Annotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRules(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRules + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthRules + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRules(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRules + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRules + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRules + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRules + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthRules + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRules + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRules(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthRules + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRules = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRules = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/rules.proto b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/rules.proto new file mode 100644 index 000000000000..64196a10e84e --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/rules.proto @@ -0,0 +1,43 @@ + +syntax = "proto3"; + +package rules; + +option go_package = "rules"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// RuleGroupDesc is a proto representation of a cortex rule group +message RuleGroupDesc { + reserved 5, 7, 8; + string name = 1; + string namespace = 2; + google.protobuf.Duration interval = 3 + [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; + repeated RuleDesc rules = 4; + string user = 6; +} + +// RuleDesc is a proto representation of a Prometheus Rule +message RuleDesc { + reserved 7 to 12; + string expr = 1; + string record = 2; + string alert = 3; + google.protobuf.Duration for = 4 [(gogoproto.nullable) = false,(gogoproto.stdduration) = true]; + repeated cortex.LabelPair labels = 5 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = + "github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter" + ]; + repeated cortex.LabelPair annotations = 6 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = + "github.com/cortexproject/cortex/pkg/ingester/client.LabelAdapter" + ]; +} \ No newline at end of file diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/store.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/store.go new file mode 100644 index 000000000000..d48777c37719 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/store.go @@ -0,0 +1,132 @@ +package rules + +import ( + "context" + "errors" + + "github.com/cortexproject/cortex/pkg/configs/userconfig" + + "github.com/cortexproject/cortex/pkg/configs/client" + + legacy_rulefmt "github.com/cortexproject/cortex/pkg/ruler/legacy_rulefmt" +) + +var ( + // ErrGroupNotFound is returned if a rule group does not exist + ErrGroupNotFound = errors.New("group does not exist") + // ErrGroupNamespaceNotFound is returned if a namespace does not exist + ErrGroupNamespaceNotFound = errors.New("group namespace does not exist") + // ErrUserNotFound is returned if the user does not currently exist + ErrUserNotFound = errors.New("no rule groups found for user") +) + +// RuleStore is used to store and retrieve rules +type RuleStore interface { + ListAllRuleGroups(ctx context.Context) (map[string]RuleGroupList, error) + ListRuleGroups(ctx context.Context, userID string, namespace string) (RuleGroupList, error) + GetRuleGroup(ctx context.Context, userID, namespace, group string) (*RuleGroupDesc, error) + SetRuleGroup(ctx context.Context, userID, namespace string, group *RuleGroupDesc) error + DeleteRuleGroup(ctx context.Context, userID, namespace string, group string) error +} + +// RuleGroupList contains a set of rule groups +type RuleGroupList []*RuleGroupDesc + +// Formatted returns the rule group list as a set of formatted rule groups mapped +// by namespace +func (l RuleGroupList) Formatted() map[string][]legacy_rulefmt.RuleGroup { + ruleMap := map[string][]legacy_rulefmt.RuleGroup{} + for _, g := range l { + if _, exists := ruleMap[g.Namespace]; !exists { + ruleMap[g.Namespace] = []legacy_rulefmt.RuleGroup{FromProto(g)} + continue + } + ruleMap[g.Namespace] = append(ruleMap[g.Namespace], FromProto(g)) + + } + return ruleMap +} + +// ConfigRuleStore is a concrete implementation of RuleStore that sources rules from the config service +type ConfigRuleStore struct { + configClient client.Client + since userconfig.ID + ruleGroupList map[string]RuleGroupList +} + +// NewConfigRuleStore constructs a ConfigRuleStore +func NewConfigRuleStore(c client.Client) *ConfigRuleStore { + return &ConfigRuleStore{ + configClient: c, + since: 0, + ruleGroupList: make(map[string]RuleGroupList), + } +} + +// ListAllRuleGroups implements RuleStore +func (c *ConfigRuleStore) ListAllRuleGroups(ctx context.Context) (map[string]RuleGroupList, error) { + + configs, err := c.configClient.GetRules(ctx, c.since) + + if err != nil { + return nil, err + } + + for user, cfg := range configs { + userRules := RuleGroupList{} + if cfg.IsDeleted() { + delete(c.ruleGroupList, user) + continue + } + rMap, err := cfg.Config.ParseFormatted() + if err != nil { + return nil, err + } + for file, rgs := range rMap { + for _, rg := range rgs.Groups { + userRules = append(userRules, ToProto(user, file, rg)) + } + } + c.ruleGroupList[user] = userRules + } + + if err != nil { + return nil, err + } + + c.since = getLatestConfigID(configs, c.since) + + return c.ruleGroupList, nil +} + +// getLatestConfigID gets the latest configs ID. +// max [latest, max (map getID cfgs)] +func getLatestConfigID(cfgs map[string]userconfig.VersionedRulesConfig, latest userconfig.ID) userconfig.ID { + ret := latest + for _, config := range cfgs { + if config.ID > ret { + ret = config.ID + } + } + return ret +} + +// ListRuleGroups is not implemented +func (c *ConfigRuleStore) ListRuleGroups(ctx context.Context, userID string, namespace string) (RuleGroupList, error) { + return nil, errors.New("not implemented by the config service rule store") +} + +// GetRuleGroup is not implemented +func (c *ConfigRuleStore) GetRuleGroup(ctx context.Context, userID, namespace, group string) (*RuleGroupDesc, error) { + return nil, errors.New("not implemented by the config service rule store") +} + +// SetRuleGroup is not implemented +func (c *ConfigRuleStore) SetRuleGroup(ctx context.Context, userID, namespace string, group *RuleGroupDesc) error { + return errors.New("not implemented by the config service rule store") +} + +// DeleteRuleGroup is not implemented +func (c *ConfigRuleStore) DeleteRuleGroup(ctx context.Context, userID, namespace string, group string) error { + return errors.New("not implemented by the config service rule store") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go new file mode 100644 index 000000000000..6bface18117b --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/storage.go @@ -0,0 +1,70 @@ +package ruler + +import ( + "context" + "flag" + "fmt" + + "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/chunk/aws" + "github.com/cortexproject/cortex/pkg/chunk/azure" + "github.com/cortexproject/cortex/pkg/chunk/gcp" + "github.com/cortexproject/cortex/pkg/configs/client" + "github.com/cortexproject/cortex/pkg/ruler/rules" + "github.com/cortexproject/cortex/pkg/ruler/rules/objectclient" +) + +// RuleStoreConfig conigures a rule store +type RuleStoreConfig struct { + Type string `yaml:"type"` + ConfigDB client.Config `yaml:"configdb"` + + // Object Storage Configs + Azure azure.BlobStorageConfig `yaml:"azure"` + GCS gcp.GCSConfig `yaml:"gcs"` + S3 aws.S3Config `yaml:"s3"` + + mock rules.RuleStore `yaml:"-"` +} + +// RegisterFlags registers flags. +func (cfg *RuleStoreConfig) RegisterFlags(f *flag.FlagSet) { + cfg.ConfigDB.RegisterFlagsWithPrefix("ruler.", f) + cfg.Azure.RegisterFlagsWithPrefix("ruler.storage.", f) + cfg.GCS.RegisterFlagsWithPrefix("ruler.storage.", f) + cfg.S3.RegisterFlagsWithPrefix("ruler.storage.", f) + f.StringVar(&cfg.Type, "ruler.storage.type", "configdb", "Method to use for backend rule storage (configdb, azure, gcs, s3)") +} + +// NewRuleStorage returns a new rule storage backend poller and store +func NewRuleStorage(cfg RuleStoreConfig) (rules.RuleStore, error) { + if cfg.mock != nil { + return cfg.mock, nil + } + + switch cfg.Type { + case "configdb": + c, err := client.New(cfg.ConfigDB) + + if err != nil { + return nil, err + } + + return rules.NewConfigRuleStore(c), nil + case "azure": + return newObjRuleStore(azure.NewBlobStorage(&cfg.Azure, "")) + case "gcs": + return newObjRuleStore(gcp.NewGCSObjectClient(context.Background(), cfg.GCS, "")) + case "s3": + return newObjRuleStore(aws.NewS3ObjectClient(cfg.S3, "")) + default: + return nil, fmt.Errorf("Unrecognized rule storage mode %v, choose one of: configdb, gcs", cfg.Type) + } +} + +func newObjRuleStore(client chunk.ObjectClient, err error) (rules.RuleStore, error) { + if err != nil { + return nil, err + } + return objectclient.NewRuleStore(client), nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/bucket_client.go new file mode 100644 index 000000000000..69cec69c530a --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/bucket_client.go @@ -0,0 +1,27 @@ +package azure + +import ( + "github.com/go-kit/kit/log" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/objstore/azure" + yaml "gopkg.in/yaml.v2" +) + +func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucket, error) { + bucketConfig := azure.Config{ + StorageAccountName: cfg.StorageAccountName, + StorageAccountKey: cfg.StorageAccountKey.Value, + ContainerName: cfg.ContainerName, + Endpoint: cfg.Endpoint, + MaxRetries: cfg.MaxRetries, + } + + // Thanos currently doesn't support passing the config as is, but expects a YAML, + // so we're going to serialize it. + serialized, err := yaml.Marshal(bucketConfig) + if err != nil { + return nil, err + } + + return azure.NewBucket(logger, serialized, name) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go new file mode 100644 index 000000000000..d8748fdc765b --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go @@ -0,0 +1,25 @@ +package azure + +import ( + "flag" + + "github.com/cortexproject/cortex/pkg/util/flagext" +) + +// Config holds the config options for an Azure backend +type Config struct { + StorageAccountName string `yaml:"account_name"` + StorageAccountKey flagext.Secret `yaml:"account_key"` + ContainerName string `yaml:"container_name"` + Endpoint string `yaml:"endpoint_suffix"` + MaxRetries int `yaml:"max_retries"` +} + +// RegisterFlags registers the flags for TSDB Azure storage +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.StorageAccountName, "experimental.tsdb.azure.account-name", "", "Azure storage account name") + f.Var(&cfg.StorageAccountKey, "experimental.tsdb.azure.account-key", "Azure storage account key") + f.StringVar(&cfg.ContainerName, "experimental.tsdb.azure.container-name", "", "Azure storage container name") + f.StringVar(&cfg.Endpoint, "experimental.tsdb.azure.endpoint-suffix", "", "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN") + f.IntVar(&cfg.MaxRetries, "experimental.tsdb.azure.max-retries", 20, "Number of retries for recoverable errors") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/bucket_client.go new file mode 100644 index 000000000000..feed92c0732b --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/bucket_client.go @@ -0,0 +1,11 @@ +package filesystem + +import ( + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/objstore/filesystem" +) + +// NewBucketClient creates a new filesystem bucket client +func NewBucketClient(cfg Config) (objstore.Bucket, error) { + return filesystem.NewBucket(cfg.Directory) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go new file mode 100644 index 000000000000..03a4f85f5b77 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go @@ -0,0 +1,13 @@ +package filesystem + +import "flag" + +// Config stores the configuration for storing and accessing objects in the local filesystem. +type Config struct { + Directory string `yaml:"dir"` +} + +// RegisterFlags registers the flags for TSDB filesystem storage +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.Directory, "experimental.tsdb.filesystem.dir", "", "Local filesystem storage directory.") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/bucket_client.go new file mode 100644 index 000000000000..5aad98df55f6 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/bucket_client.go @@ -0,0 +1,27 @@ +package gcs + +import ( + "context" + + "github.com/go-kit/kit/log" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/objstore/gcs" + yaml "gopkg.in/yaml.v2" +) + +// NewBucketClient creates a new GCS bucket client +func NewBucketClient(ctx context.Context, cfg Config, name string, logger log.Logger) (objstore.Bucket, error) { + bucketConfig := gcs.Config{ + Bucket: cfg.BucketName, + ServiceAccount: cfg.ServiceAccount.Value, + } + + // Thanos currently doesn't support passing the config as is, but expects a YAML, + // so we're going to serialize it. + serialized, err := yaml.Marshal(bucketConfig) + if err != nil { + return nil, err + } + + return gcs.NewBucket(ctx, logger, serialized, name) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go new file mode 100644 index 000000000000..0363b9579b68 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go @@ -0,0 +1,19 @@ +package gcs + +import ( + "flag" + + "github.com/cortexproject/cortex/pkg/util/flagext" +) + +// Config holds the config options for GCS backend +type Config struct { + BucketName string `yaml:"bucket_name"` + ServiceAccount flagext.Secret `yaml:"service_account"` +} + +// RegisterFlags registers the flags for TSDB GCS storage +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.BucketName, "experimental.tsdb.gcs.bucket-name", "", "GCS bucket name") + f.Var(&cfg.ServiceAccount, "experimental.tsdb.gcs.service-account", "JSON representing either a Google Developers Console client_credentials.json file or a Google Developers service account key file. If empty, fallback to Google default logic.") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/bucket_client.go new file mode 100644 index 000000000000..c9f54a099e67 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/bucket_client.go @@ -0,0 +1,27 @@ +package s3 + +import ( + "github.com/go-kit/kit/log" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/objstore/s3" +) + +// NewBucketClient creates a new S3 bucket client +func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucket, error) { + return s3.NewBucketWithConfig(logger, newS3Config(cfg), name) +} + +// NewBucketReaderClient creates a new S3 bucket client +func NewBucketReaderClient(cfg Config, name string, logger log.Logger) (objstore.BucketReader, error) { + return s3.NewBucketWithConfig(logger, newS3Config(cfg), name) +} + +func newS3Config(cfg Config) s3.Config { + return s3.Config{ + Bucket: cfg.BucketName, + Endpoint: cfg.Endpoint, + AccessKey: cfg.AccessKeyID, + SecretKey: cfg.SecretAccessKey.Value, + Insecure: cfg.Insecure, + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go new file mode 100644 index 000000000000..60e52d9f9b72 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go @@ -0,0 +1,25 @@ +package s3 + +import ( + "flag" + + "github.com/cortexproject/cortex/pkg/util/flagext" +) + +// Config holds the config options for an S3 backend +type Config struct { + Endpoint string `yaml:"endpoint"` + BucketName string `yaml:"bucket_name"` + SecretAccessKey flagext.Secret `yaml:"secret_access_key"` + AccessKeyID string `yaml:"access_key_id"` + Insecure bool `yaml:"insecure"` +} + +// RegisterFlags registers the flags for TSDB s3 storage +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.StringVar(&cfg.AccessKeyID, "experimental.tsdb.s3.access-key-id", "", "S3 access key ID") + f.Var(&cfg.SecretAccessKey, "experimental.tsdb.s3.secret-access-key", "S3 secret access key") + f.StringVar(&cfg.BucketName, "experimental.tsdb.s3.bucket-name", "", "S3 bucket name") + f.StringVar(&cfg.Endpoint, "experimental.tsdb.s3.endpoint", "", "S3 endpoint without schema") + f.BoolVar(&cfg.Insecure, "experimental.tsdb.s3.insecure", false, "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go new file mode 100644 index 000000000000..103a0151ebfb --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go @@ -0,0 +1,29 @@ +package tsdb + +import ( + "context" + + "github.com/go-kit/kit/log" + "github.com/thanos-io/thanos/pkg/objstore" + + "github.com/cortexproject/cortex/pkg/storage/backend/azure" + "github.com/cortexproject/cortex/pkg/storage/backend/filesystem" + "github.com/cortexproject/cortex/pkg/storage/backend/gcs" + "github.com/cortexproject/cortex/pkg/storage/backend/s3" +) + +// NewBucketClient creates a new bucket client based on the configured backend +func NewBucketClient(ctx context.Context, cfg Config, name string, logger log.Logger) (objstore.Bucket, error) { + switch cfg.Backend { + case BackendS3: + return s3.NewBucketClient(cfg.S3, name, logger) + case BackendGCS: + return gcs.NewBucketClient(ctx, cfg.GCS, name, logger) + case BackendAzure: + return azure.NewBucketClient(cfg.Azure, name, logger) + case BackendFilesystem: + return filesystem.NewBucketClient(cfg.Filesystem) + default: + return nil, errUnsupportedStorageBackend + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client_mock.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client_mock.go new file mode 100644 index 000000000000..6f695d2862f0 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client_mock.go @@ -0,0 +1,117 @@ +package tsdb + +import ( + "bytes" + "context" + "errors" + "io" + "io/ioutil" + + "github.com/stretchr/testify/mock" +) + +var errObjectDoesNotExist = errors.New("object does not exist") + +// BucketClientMock mocks objstore.Bucket +type BucketClientMock struct { + mock.Mock +} + +// Upload mocks objstore.Bucket.Upload() +func (m *BucketClientMock) Upload(ctx context.Context, name string, r io.Reader) error { + args := m.Called(ctx, name, r) + return args.Error(0) +} + +// Delete mocks objstore.Bucket.Delete() +func (m *BucketClientMock) Delete(ctx context.Context, name string) error { + args := m.Called(ctx, name) + return args.Error(0) +} + +// Name mocks objstore.Bucket.Name() +func (m *BucketClientMock) Name() string { + return "mock" +} + +// Iter mocks objstore.Bucket.Iter() +func (m *BucketClientMock) Iter(ctx context.Context, dir string, f func(string) error) error { + args := m.Called(ctx, dir, f) + return args.Error(0) +} + +// MockIter is a convenient method to mock Iter() +func (m *BucketClientMock) MockIter(prefix string, objects []string, err error) { + m.MockIterWithCallback(prefix, objects, err, nil) +} + +// MockIterWithCallback is a convenient method to mock Iter() and get a callback called when the Iter +// API is called. +func (m *BucketClientMock) MockIterWithCallback(prefix string, objects []string, err error, cb func()) { + m.On("Iter", mock.Anything, prefix, mock.Anything).Return(err).Run(func(args mock.Arguments) { + if cb != nil { + cb() + } + + f := args.Get(2).(func(string) error) + + for _, o := range objects { + if f(o) != nil { + break + } + } + }) +} + +// Get mocks objstore.Bucket.Get() +func (m *BucketClientMock) Get(ctx context.Context, name string) (io.ReadCloser, error) { + args := m.Called(ctx, name) + val, err := args.Get(0), args.Error(1) + if val == nil { + return nil, err + } + return val.(io.ReadCloser), err +} + +// MockGet is a convenient method to mock Get() and Exists() +func (m *BucketClientMock) MockGet(name, content string, err error) { + if content != "" { + m.On("Exists", mock.Anything, name).Return(true, err) + m.On("Get", mock.Anything, name).Return(ioutil.NopCloser(bytes.NewReader([]byte(content))), err) + } else { + m.On("Exists", mock.Anything, name).Return(false, err) + m.On("Get", mock.Anything, name).Return(nil, errObjectDoesNotExist) + } +} + +func (m *BucketClientMock) MockDelete(name string, err error) { + m.On("Delete", mock.Anything, name).Return(err) +} + +// GetRange mocks objstore.Bucket.GetRange() +func (m *BucketClientMock) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { + args := m.Called(ctx, name, off, length) + return args.Get(0).(io.ReadCloser), args.Error(1) +} + +// Exists mocks objstore.Bucket.Exists() +func (m *BucketClientMock) Exists(ctx context.Context, name string) (bool, error) { + args := m.Called(ctx, name) + return args.Bool(0), args.Error(1) +} + +// IsObjNotFoundErr mocks objstore.Bucket.IsObjNotFoundErr() +func (m *BucketClientMock) IsObjNotFoundErr(err error) bool { + return err == errObjectDoesNotExist +} + +// ObjectSize mocks objstore.Bucket.ObjectSize() +func (m *BucketClientMock) ObjectSize(ctx context.Context, name string) (uint64, error) { + args := m.Called(ctx, name) + return args.Get(0).(uint64), args.Error(1) +} + +// Close mocks objstore.Bucket.Close() +func (m *BucketClientMock) Close() error { + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go new file mode 100644 index 000000000000..3bab9993b210 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go @@ -0,0 +1,210 @@ +package tsdb + +import ( + "errors" + "flag" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/alecthomas/units" + "github.com/thanos-io/thanos/pkg/store" + + "github.com/cortexproject/cortex/pkg/storage/backend/azure" + "github.com/cortexproject/cortex/pkg/storage/backend/filesystem" + "github.com/cortexproject/cortex/pkg/storage/backend/gcs" + "github.com/cortexproject/cortex/pkg/storage/backend/s3" + "github.com/cortexproject/cortex/pkg/util" +) + +const ( + // BackendS3 is the value for the S3 storage backend + BackendS3 = "s3" + + // BackendGCS is the value for the GCS storage backend + BackendGCS = "gcs" + + // BackendAzure is the value for the Azure storage backend + BackendAzure = "azure" + + // BackendFilesystem is the value for the filesystem storge backend + BackendFilesystem = "filesystem" + + // TenantIDExternalLabel is the external label set when shipping blocks to the storage + TenantIDExternalLabel = "__org_id__" +) + +// Validation errors +var ( + supportedBackends = []string{BackendS3, BackendGCS, BackendAzure, BackendFilesystem} + + errUnsupportedStorageBackend = errors.New("unsupported TSDB storage backend") + errInvalidShipConcurrency = errors.New("invalid TSDB ship concurrency") + errInvalidCompactionInterval = errors.New("invalid TSDB compaction interval") + errInvalidCompactionConcurrency = errors.New("invalid TSDB compaction concurrency") + errInvalidStripeSize = errors.New("invalid TSDB stripe size") +) + +// Config holds the config information for TSDB storage +type Config struct { + Dir string `yaml:"dir"` + BlockRanges DurationList `yaml:"block_ranges_period"` + Retention time.Duration `yaml:"retention_period"` + ShipInterval time.Duration `yaml:"ship_interval"` + ShipConcurrency int `yaml:"ship_concurrency"` + Backend string `yaml:"backend"` + BucketStore BucketStoreConfig `yaml:"bucket_store"` + HeadCompactionInterval time.Duration `yaml:"head_compaction_interval"` + HeadCompactionConcurrency int `yaml:"head_compaction_concurrency"` + StripeSize int `yaml:"stripe_size"` + + // MaxTSDBOpeningConcurrencyOnStartup limits the number of concurrently opening TSDB's during startup + MaxTSDBOpeningConcurrencyOnStartup int `yaml:"max_tsdb_opening_concurrency_on_startup"` + + // Backends + S3 s3.Config `yaml:"s3"` + GCS gcs.Config `yaml:"gcs"` + Azure azure.Config `yaml:"azure"` + Filesystem filesystem.Config `yaml:"filesystem"` +} + +// DurationList is the block ranges for a tsdb +type DurationList []time.Duration + +// String implements the flag.Value interface +func (d *DurationList) String() string { + values := make([]string, 0, len(*d)) + for _, v := range *d { + values = append(values, v.String()) + } + + return strings.Join(values, ",") +} + +// Set implements the flag.Value interface +func (d *DurationList) Set(s string) error { + values := strings.Split(s, ",") + *d = make([]time.Duration, 0, len(values)) // flag.Parse may be called twice, so overwrite instead of append + for _, v := range values { + t, err := time.ParseDuration(v) + if err != nil { + return err + } + *d = append(*d, t) + } + return nil +} + +// ToMilliseconds returns the duration list in milliseconds +func (d *DurationList) ToMilliseconds() []int64 { + values := make([]int64, 0, len(*d)) + for _, t := range *d { + values = append(values, t.Milliseconds()) + } + + return values +} + +// RegisterFlags registers the TSDB flags +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.S3.RegisterFlags(f) + cfg.GCS.RegisterFlags(f) + cfg.Azure.RegisterFlags(f) + cfg.BucketStore.RegisterFlags(f) + cfg.Filesystem.RegisterFlags(f) + + if len(cfg.BlockRanges) == 0 { + cfg.BlockRanges = []time.Duration{2 * time.Hour} // Default 2h block + } + + f.StringVar(&cfg.Dir, "experimental.tsdb.dir", "tsdb", "Local directory to store TSDBs in the ingesters.") + f.Var(&cfg.BlockRanges, "experimental.tsdb.block-ranges-period", "TSDB blocks range period.") + f.DurationVar(&cfg.Retention, "experimental.tsdb.retention-period", 6*time.Hour, "TSDB blocks retention in the ingester before a block is removed. This should be larger than the block_ranges_period and large enough to give queriers enough time to discover newly uploaded blocks.") + f.DurationVar(&cfg.ShipInterval, "experimental.tsdb.ship-interval", 1*time.Minute, "How frequently the TSDB blocks are scanned and new ones are shipped to the storage. 0 means shipping is disabled.") + f.IntVar(&cfg.ShipConcurrency, "experimental.tsdb.ship-concurrency", 10, "Maximum number of tenants concurrently shipping blocks to the storage.") + f.StringVar(&cfg.Backend, "experimental.tsdb.backend", "s3", fmt.Sprintf("Backend storage to use. Supported backends are: %s.", strings.Join(supportedBackends, ", "))) + f.IntVar(&cfg.MaxTSDBOpeningConcurrencyOnStartup, "experimental.tsdb.max-tsdb-opening-concurrency-on-startup", 10, "limit the number of concurrently opening TSDB's on startup") + f.DurationVar(&cfg.HeadCompactionInterval, "experimental.tsdb.head-compaction-interval", 1*time.Minute, "How frequently does Cortex try to compact TSDB head. Block is only created if data covers smallest block range. Must be greater than 0 and max 5 minutes.") + f.IntVar(&cfg.HeadCompactionConcurrency, "experimental.tsdb.head-compaction-concurrency", 5, "Maximum number of tenants concurrently compacting TSDB head into a new block") + f.IntVar(&cfg.StripeSize, "experimental.tsdb.stripe-size", 16384, "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.") +} + +// Validate the config. +func (cfg *Config) Validate() error { + if !util.StringsContain(supportedBackends, cfg.Backend) { + return errUnsupportedStorageBackend + } + + if cfg.ShipInterval > 0 && cfg.ShipConcurrency <= 0 { + return errInvalidShipConcurrency + } + + if cfg.HeadCompactionInterval <= 0 || cfg.HeadCompactionInterval > 5*time.Minute { + return errInvalidCompactionInterval + } + + if cfg.HeadCompactionConcurrency <= 0 { + return errInvalidCompactionConcurrency + } + + if cfg.StripeSize <= 1 || (cfg.StripeSize&(cfg.StripeSize-1)) != 0 { // ensure stripe size is a positive power of 2 + return errInvalidStripeSize + } + + return cfg.BucketStore.Validate() +} + +// BucketStoreConfig holds the config information for Bucket Stores used by the querier +type BucketStoreConfig struct { + SyncDir string `yaml:"sync_dir"` + SyncInterval time.Duration `yaml:"sync_interval"` + MaxChunkPoolBytes uint64 `yaml:"max_chunk_pool_bytes"` + MaxSampleCount uint64 `yaml:"max_sample_count"` + MaxConcurrent int `yaml:"max_concurrent"` + TenantSyncConcurrency int `yaml:"tenant_sync_concurrency"` + BlockSyncConcurrency int `yaml:"block_sync_concurrency"` + MetaSyncConcurrency int `yaml:"meta_sync_concurrency"` + BinaryIndexHeader bool `yaml:"binary_index_header_enabled"` + ConsistencyDelay time.Duration `yaml:"consistency_delay"` + IndexCache IndexCacheConfig `yaml:"index_cache"` + IgnoreDeletionMarksDelay time.Duration `yaml:"ignore_deletion_mark_delay"` + + // Controls what is the ratio of postings offsets store will hold in memory. + // Larger value will keep less offsets, which will increase CPU cycles needed for query touching those postings. + // It's meant for setups that want low baseline memory pressure and where less traffic is expected. + // On the contrary, smaller value will increase baseline memory usage, but improve latency slightly. + // 1 will keep all in memory. Default value is the same as in Prometheus which gives a good balance. + PostingOffsetsInMemSampling int `yaml:"postings_offsets_in_mem_sampling" doc:"hidden"` +} + +// RegisterFlags registers the BucketStore flags +func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) { + cfg.IndexCache.RegisterFlagsWithPrefix(f, "experimental.tsdb.bucket-store.index-cache.") + + f.StringVar(&cfg.SyncDir, "experimental.tsdb.bucket-store.sync-dir", "tsdb-sync", "Directory to store synchronized TSDB index headers.") + f.DurationVar(&cfg.SyncInterval, "experimental.tsdb.bucket-store.sync-interval", 5*time.Minute, "How frequently scan the bucket to look for changes (new blocks shipped by ingesters and blocks removed by retention or compaction). 0 disables it.") + f.Uint64Var(&cfg.MaxChunkPoolBytes, "experimental.tsdb.bucket-store.max-chunk-pool-bytes", uint64(2*units.Gibibyte), "Max size - in bytes - of a per-tenant chunk pool, used to reduce memory allocations.") + f.Uint64Var(&cfg.MaxSampleCount, "experimental.tsdb.bucket-store.max-sample-count", 0, "Max number of samples per query when loading series from the long-term storage. 0 disables the limit.") + f.IntVar(&cfg.MaxConcurrent, "experimental.tsdb.bucket-store.max-concurrent", 20, "Max number of concurrent queries to execute against the long-term storage on a per-tenant basis.") + f.IntVar(&cfg.TenantSyncConcurrency, "experimental.tsdb.bucket-store.tenant-sync-concurrency", 10, "Maximum number of concurrent tenants synching blocks.") + f.IntVar(&cfg.BlockSyncConcurrency, "experimental.tsdb.bucket-store.block-sync-concurrency", 20, "Maximum number of concurrent blocks synching per tenant.") + f.IntVar(&cfg.MetaSyncConcurrency, "experimental.tsdb.bucket-store.meta-sync-concurrency", 20, "Number of Go routines to use when syncing block meta files from object storage per tenant.") + f.BoolVar(&cfg.BinaryIndexHeader, "experimental.tsdb.bucket-store.binary-index-header-enabled", true, "Whether the bucket store should use the binary index header. If false, it uses the JSON index header.") + f.DurationVar(&cfg.ConsistencyDelay, "experimental.tsdb.bucket-store.consistency-delay", 0, "Minimum age of a block before it's being read. Set it to safe value (e.g 30m) if your object storage is eventually consistent. GCS and S3 are (roughly) strongly consistent.") + f.DurationVar(&cfg.IgnoreDeletionMarksDelay, "experimental.tsdb.bucket-store.ignore-deletion-marks-delay", time.Hour*6, "Duration after which the blocks marked for deletion will be filtered out while fetching blocks. "+ + "The idea of ignore-deletion-marks-delay is to ignore blocks that are marked for deletion with some delay. This ensures store can still serve blocks that are meant to be deleted but do not have a replacement yet."+ + "Default is 6h, half of the default value for -compactor.deletion-delay.") + f.IntVar(&cfg.PostingOffsetsInMemSampling, "experimental.tsdb.bucket-store.posting-offsets-in-mem-sampling", store.DefaultPostingOffsetInMemorySampling, "Controls what is the ratio of postings offsets that the store will hold in memory.") +} + +// Validate the config. +func (cfg *BucketStoreConfig) Validate() error { + return cfg.IndexCache.Validate() +} + +// BlocksDir returns the directory path where TSDB blocks and wal should be +// stored by the ingester +func (cfg *Config) BlocksDir(userID string) string { + return filepath.Join(cfg.Dir, userID) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go new file mode 100644 index 000000000000..20fbd6d13faa --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go @@ -0,0 +1,167 @@ +package tsdb + +import ( + "flag" + "fmt" + "strings" + "time" + + "github.com/alecthomas/units" + "github.com/go-kit/kit/log" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/thanos/pkg/cacheutil" + "github.com/thanos-io/thanos/pkg/model" + storecache "github.com/thanos-io/thanos/pkg/store/cache" + + "github.com/cortexproject/cortex/pkg/util" +) + +const ( + // IndexCacheBackendInMemory is the value for the in-memory index cache backend. + IndexCacheBackendInMemory = "inmemory" + + // IndexCacheBackendMemcached is the value for the memcached index cache backend. + IndexCacheBackendMemcached = "memcached" + + // IndexCacheBackendDefault is the value for the default index cache backend. + IndexCacheBackendDefault = IndexCacheBackendInMemory + + defaultMaxItemSize = model.Bytes(128 * units.MiB) +) + +var ( + supportedIndexCacheBackends = []string{IndexCacheBackendInMemory, IndexCacheBackendMemcached} + + errUnsupportedIndexCacheBackend = errors.New("unsupported index cache backend") + errNoIndexCacheAddresses = errors.New("no index cache backend addresses") +) + +type IndexCacheConfig struct { + Backend string `yaml:"backend"` + InMemory InMemoryIndexCacheConfig `yaml:"inmemory"` + Memcached MemcachedIndexCacheConfig `yaml:"memcached"` + PostingsCompression bool `yaml:"postings_compression_enabled"` +} + +func (cfg *IndexCacheConfig) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix(f, "experimental.tsdb.bucket-store.index-cache.") +} + +func (cfg *IndexCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { + f.StringVar(&cfg.Backend, prefix+"backend", IndexCacheBackendDefault, fmt.Sprintf("The index cache backend type. Supported values: %s.", strings.Join(supportedIndexCacheBackends, ", "))) + f.BoolVar(&cfg.PostingsCompression, prefix+"postings-compression-enabled", false, "Compress postings before storing them to postings cache.") + + cfg.InMemory.RegisterFlagsWithPrefix(f, prefix+"inmemory.") + cfg.Memcached.RegisterFlagsWithPrefix(f, prefix+"memcached.") +} + +// Validate the config. +func (cfg *IndexCacheConfig) Validate() error { + if !util.StringsContain(supportedIndexCacheBackends, cfg.Backend) { + return errUnsupportedIndexCacheBackend + } + + if cfg.Backend == IndexCacheBackendMemcached { + if err := cfg.Memcached.Validate(); err != nil { + return err + } + } + + return nil +} + +type InMemoryIndexCacheConfig struct { + MaxSizeBytes uint64 `yaml:"max_size_bytes"` +} + +func (cfg *InMemoryIndexCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { + f.Uint64Var(&cfg.MaxSizeBytes, prefix+"max-size-bytes", uint64(1*units.Gibibyte), "Maximum size in bytes of in-memory index cache used to speed up blocks index lookups (shared between all tenants).") +} + +type MemcachedIndexCacheConfig struct { + Addresses string `yaml:"addresses"` + Timeout time.Duration `yaml:"timeout"` + MaxIdleConnections int `yaml:"max_idle_connections"` + MaxAsyncConcurrency int `yaml:"max_async_concurrency"` + MaxAsyncBufferSize int `yaml:"max_async_buffer_size"` + MaxGetMultiConcurrency int `yaml:"max_get_multi_concurrency"` + MaxGetMultiBatchSize int `yaml:"max_get_multi_batch_size"` + MaxItemSize int `yaml:"max_item_size"` +} + +func (cfg *MemcachedIndexCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { + f.StringVar(&cfg.Addresses, prefix+"addresses", "", "Comma separated list of memcached addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).") + f.DurationVar(&cfg.Timeout, prefix+"timeout", 100*time.Millisecond, "The socket read/write timeout.") + f.IntVar(&cfg.MaxIdleConnections, prefix+"max-idle-connections", 16, "The maximum number of idle connections that will be maintained per address.") + f.IntVar(&cfg.MaxAsyncConcurrency, prefix+"max-async-concurrency", 50, "The maximum number of concurrent asynchronous operations can occur.") + f.IntVar(&cfg.MaxAsyncBufferSize, prefix+"max-async-buffer-size", 10000, "The maximum number of enqueued asynchronous operations allowed.") + f.IntVar(&cfg.MaxGetMultiConcurrency, prefix+"max-get-multi-concurrency", 100, "The maximum number of concurrent connections running get operations. If set to 0, concurrency is unlimited.") + f.IntVar(&cfg.MaxGetMultiBatchSize, prefix+"max-get-multi-batch-size", 0, "The maximum number of keys a single underlying get operation should run. If more keys are specified, internally keys are splitted into multiple batches and fetched concurrently, honoring the max concurrency. If set to 0, the max batch size is unlimited.") + f.IntVar(&cfg.MaxItemSize, prefix+"max-item-size", 1024*1024, "The maximum size of an item stored in memcached. Bigger items are not stored. If set to 0, no maximum size is enforced.") +} + +func (cfg *MemcachedIndexCacheConfig) GetAddresses() []string { + if cfg.Addresses == "" { + return []string{} + } + + return strings.Split(cfg.Addresses, ",") +} + +// Validate the config. +func (cfg *MemcachedIndexCacheConfig) Validate() error { + if len(cfg.GetAddresses()) == 0 { + return errNoIndexCacheAddresses + } + + return nil +} + +// NewIndexCache creates a new index cache based on the input configuration. +func NewIndexCache(cfg IndexCacheConfig, logger log.Logger, registerer prometheus.Registerer) (storecache.IndexCache, error) { + switch cfg.Backend { + case IndexCacheBackendInMemory: + return newInMemoryIndexCache(cfg.InMemory, logger, registerer) + case IndexCacheBackendMemcached: + return newMemcachedIndexCache(cfg.Memcached, logger, registerer) + default: + return nil, errUnsupportedIndexCacheBackend + } +} + +func newInMemoryIndexCache(cfg InMemoryIndexCacheConfig, logger log.Logger, registerer prometheus.Registerer) (storecache.IndexCache, error) { + maxCacheSize := model.Bytes(cfg.MaxSizeBytes) + + // Calculate the max item size. + maxItemSize := defaultMaxItemSize + if maxItemSize > maxCacheSize { + maxItemSize = maxCacheSize + } + + return storecache.NewInMemoryIndexCacheWithConfig(logger, registerer, storecache.InMemoryIndexCacheConfig{ + MaxSize: maxCacheSize, + MaxItemSize: maxItemSize, + }) +} + +func newMemcachedIndexCache(cfg MemcachedIndexCacheConfig, logger log.Logger, registerer prometheus.Registerer) (storecache.IndexCache, error) { + config := cacheutil.MemcachedClientConfig{ + Addresses: cfg.GetAddresses(), + Timeout: cfg.Timeout, + MaxIdleConnections: cfg.MaxIdleConnections, + MaxAsyncConcurrency: cfg.MaxAsyncConcurrency, + MaxAsyncBufferSize: cfg.MaxAsyncBufferSize, + MaxGetMultiConcurrency: cfg.MaxGetMultiConcurrency, + MaxGetMultiBatchSize: cfg.MaxGetMultiBatchSize, + MaxItemSize: model.Bytes(cfg.MaxItemSize), + DNSProviderUpdateInterval: 30 * time.Second, + } + + client, err := cacheutil.NewMemcachedClientWithConfig(logger, "index-cache", config, registerer) + if err != nil { + return nil, errors.Wrapf(err, "create index cache memcached client") + } + + return storecache.NewMemcachedIndexCache(logger, client, registerer) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache_metrics.go new file mode 100644 index 000000000000..8cf75cffcb31 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache_metrics.go @@ -0,0 +1,175 @@ +package tsdb + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/cortexproject/cortex/pkg/util" +) + +func MustNewIndexCacheMetrics(backend string, reg *prometheus.Registry) prometheus.Collector { + switch backend { + case IndexCacheBackendInMemory: + return NewInMemoryIndexCacheMetrics(reg) + case IndexCacheBackendMemcached: + return NewMemcachedIndexCacheMetrics(reg) + default: + panic(errUnsupportedIndexCacheBackend.Error()) + } +} + +// InMemoryIndexCacheMetrics aggregates metrics exported by Thanos in-memory index cache +// and re-exports them as Cortex metrics. +type InMemoryIndexCacheMetrics struct { + reg *prometheus.Registry + + // Metrics gathered from Thanos InMemoryIndexCache + cacheItemsEvicted *prometheus.Desc + cacheItemsAdded *prometheus.Desc + cacheRequests *prometheus.Desc + cacheItemsOverflow *prometheus.Desc + cacheHits *prometheus.Desc + cacheItemsCurrentCount *prometheus.Desc + cacheItemsCurrentSize *prometheus.Desc + cacheItemsTotalCurrentSize *prometheus.Desc + + // Ignored: + // thanos_store_index_cache_max_size_bytes + // thanos_store_index_cache_max_item_size_bytes +} + +// NewInMemoryIndexCacheMetrics makes InMemoryIndexCacheMetrics. +func NewInMemoryIndexCacheMetrics(reg *prometheus.Registry) *InMemoryIndexCacheMetrics { + return &InMemoryIndexCacheMetrics{ + reg: reg, + + // Cache + cacheItemsEvicted: prometheus.NewDesc( + "blocks_index_cache_items_evicted_total", + "Total number of items that were evicted from the index cache.", + []string{"item_type"}, nil), + cacheItemsAdded: prometheus.NewDesc( + "blocks_index_cache_items_added_total", + "Total number of items that were added to the index cache.", + []string{"item_type"}, nil), + cacheRequests: prometheus.NewDesc( + "blocks_index_cache_requests_total", + "Total number of requests to the cache.", + []string{"item_type"}, nil), + cacheItemsOverflow: prometheus.NewDesc( + "blocks_index_cache_items_overflowed_total", + "Total number of items that could not be added to the cache due to being too big.", + []string{"item_type"}, nil), + cacheHits: prometheus.NewDesc( + "blocks_index_cache_hits_total", + "Total number of requests to the cache that were a hit.", + []string{"item_type"}, nil), + cacheItemsCurrentCount: prometheus.NewDesc( + "blocks_index_cache_items", + "Current number of items in the index cache.", + []string{"item_type"}, nil), + cacheItemsCurrentSize: prometheus.NewDesc( + "blocks_index_cache_items_size_bytes", + "Current byte size of items in the index cache.", + []string{"item_type"}, nil), + cacheItemsTotalCurrentSize: prometheus.NewDesc( + "blocks_index_cache_total_size_bytes", + "Current byte size of items (both value and key) in the index cache.", + []string{"item_type"}, nil), + } +} + +func (m *InMemoryIndexCacheMetrics) Describe(out chan<- *prometheus.Desc) { + out <- m.cacheItemsEvicted + out <- m.cacheItemsAdded + out <- m.cacheRequests + out <- m.cacheItemsOverflow + out <- m.cacheHits + out <- m.cacheItemsCurrentCount + out <- m.cacheItemsCurrentSize + out <- m.cacheItemsTotalCurrentSize +} + +func (m *InMemoryIndexCacheMetrics) Collect(out chan<- prometheus.Metric) { + data := util.BuildMetricFamiliesPerUserFromUserRegistries(map[string]*prometheus.Registry{ + "": m.reg, + }) + + data.SendSumOfCountersWithLabels(out, m.cacheItemsEvicted, "thanos_store_index_cache_items_evicted_total", "item_type") + data.SendSumOfCountersWithLabels(out, m.cacheItemsAdded, "thanos_store_index_cache_items_added_total", "item_type") + data.SendSumOfCountersWithLabels(out, m.cacheRequests, "thanos_store_index_cache_requests_total", "item_type") + data.SendSumOfCountersWithLabels(out, m.cacheItemsOverflow, "thanos_store_index_cache_items_overflowed_total", "item_type") + data.SendSumOfCountersWithLabels(out, m.cacheHits, "thanos_store_index_cache_hits_total", "item_type") + + data.SendSumOfGaugesWithLabels(out, m.cacheItemsCurrentCount, "thanos_store_index_cache_items", "item_type") + data.SendSumOfGaugesWithLabels(out, m.cacheItemsCurrentSize, "thanos_store_index_cache_items_size_bytes", "item_type") + data.SendSumOfGaugesWithLabels(out, m.cacheItemsTotalCurrentSize, "thanos_store_index_cache_total_size_bytes", "item_type") +} + +// MemcachedIndexCacheMetrics aggregates metrics exported by Thanos memcached index cache +// and re-exports them as Cortex metrics. +type MemcachedIndexCacheMetrics struct { + reg *prometheus.Registry + + // Metrics gathered from Thanos MemcachedIndexCache (and client). + cacheRequests *prometheus.Desc + cacheHits *prometheus.Desc + memcachedOperations *prometheus.Desc + memcachedFailures *prometheus.Desc + memcachedDuration *prometheus.Desc + memcachedSkipped *prometheus.Desc +} + +// NewMemcachedIndexCacheMetrics makes MemcachedIndexCacheMetrics. +func NewMemcachedIndexCacheMetrics(reg *prometheus.Registry) *MemcachedIndexCacheMetrics { + return &MemcachedIndexCacheMetrics{ + reg: reg, + + cacheRequests: prometheus.NewDesc( + "blocks_index_cache_requests_total", + "Total number of requests to the cache.", + []string{"item_type"}, nil), + cacheHits: prometheus.NewDesc( + "blocks_index_cache_hits_total", + "Total number of requests to the cache that were a hit.", + []string{"item_type"}, nil), + memcachedOperations: prometheus.NewDesc( + "blocks_index_cache_memcached_operations_total", + "Total number of operations against memcached.", + []string{"operation"}, nil), + memcachedFailures: prometheus.NewDesc( + "blocks_index_cache_memcached_operation_failures_total", + "Total number of operations against memcached that failed.", + []string{"operation"}, nil), + memcachedDuration: prometheus.NewDesc( + "blocks_index_cache_memcached_operation_duration_seconds", + "Duration of operations against memcached.", + []string{"operation"}, nil), + memcachedSkipped: prometheus.NewDesc( + "blocks_index_cache_memcached_operation_skipped_total", + "Total number of operations against memcached that have been skipped.", + []string{"operation", "reason"}, nil), + } +} + +func (m *MemcachedIndexCacheMetrics) Describe(out chan<- *prometheus.Desc) { + out <- m.cacheRequests + out <- m.cacheHits + out <- m.memcachedOperations + out <- m.memcachedFailures + out <- m.memcachedDuration + out <- m.memcachedSkipped +} + +func (m *MemcachedIndexCacheMetrics) Collect(out chan<- prometheus.Metric) { + data := util.BuildMetricFamiliesPerUserFromUserRegistries(map[string]*prometheus.Registry{ + "": m.reg, + }) + + data.SendSumOfCountersWithLabels(out, m.cacheRequests, "thanos_store_index_cache_requests_total", "item_type") + data.SendSumOfCountersWithLabels(out, m.cacheHits, "thanos_store_index_cache_hits_total", "item_type") + + data.SendSumOfCountersWithLabels(out, m.memcachedOperations, "thanos_memcached_operations_total", "operation") + data.SendSumOfCountersWithLabels(out, m.memcachedFailures, "thanos_memcached_operation_failures_total", "operation") + data.SendSumOfHistogramsWithLabels(out, m.memcachedDuration, "thanos_memcached_operation_duration_seconds", "operation") + data.SendSumOfCountersWithLabels(out, m.memcachedSkipped, "thanos_memcached_operation_skipped_total", "operation", "reason") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go new file mode 100644 index 000000000000..cde7eccc66ec --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go @@ -0,0 +1,158 @@ +package tsdb + +import ( + "sync" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/util" +) + +const ( + // DefaultRefCacheTTL is the default RefCache purge TTL. We use a reasonable + // value that should cover most use cases. The cache would be ineffective if + // the scrape interval of a series is greater than this TTL. + DefaultRefCacheTTL = 10 * time.Minute + + numRefCacheStripes = 128 +) + +// RefCache is a single-tenant cache mapping a labels set with the reference +// ID in TSDB, in order to be able to append samples to the TSDB head without having +// to copy write request series labels each time (because the memory buffers used to +// unmarshal the write request is reused). +type RefCache struct { + // The cache is split into stripes, each one with a dedicated lock, in + // order to reduce lock contention. + stripes [numRefCacheStripes]*refCacheStripe +} + +// refCacheStripe holds a subset of the series references for a single tenant. +type refCacheStripe struct { + refsMu sync.Mutex + refs map[model.Fingerprint][]*refCacheEntry +} + +// refCacheEntry holds a single series reference. +type refCacheEntry struct { + lbs labels.Labels + ref uint64 + touchedAt time.Time +} + +// NewRefCache makes a new RefCache. +func NewRefCache() *RefCache { + c := &RefCache{} + + // Stripes are pre-allocated so that we only read on them and no lock is required. + for i := uint8(0); i < numRefCacheStripes; i++ { + c.stripes[i] = &refCacheStripe{ + refs: map[model.Fingerprint][]*refCacheEntry{}, + } + } + + return c +} + +// Ref returns the cached series reference, and guarantees the input labels set +// is NOT retained. +func (c *RefCache) Ref(now time.Time, series labels.Labels) (uint64, bool) { + fp := client.Fingerprint(series) + stripeID := uint8(util.HashFP(fp) % numRefCacheStripes) + + return c.stripes[stripeID].ref(now, series, fp) +} + +// SetRef sets/updates the cached series reference. The input labels set IS retained. +func (c *RefCache) SetRef(now time.Time, series labels.Labels, ref uint64) { + fp := client.Fingerprint(series) + stripeID := uint8(util.HashFP(fp) % numRefCacheStripes) + + c.stripes[stripeID].setRef(now, series, fp, ref) +} + +// Purge removes expired entries from the cache. This function should be called +// periodically to avoid memory leaks. +func (c *RefCache) Purge(keepUntil time.Time) { + for s := uint8(0); s < numRefCacheStripes; s++ { + c.stripes[s].purge(keepUntil) + } +} + +func (s *refCacheStripe) ref(now time.Time, series labels.Labels, fp model.Fingerprint) (uint64, bool) { + s.refsMu.Lock() + defer s.refsMu.Unlock() + + entries, ok := s.refs[fp] + if !ok { + return 0, false + } + + for _, entry := range entries { + if labels.Equal(entry.lbs, series) { + // Get the reference and touch the timestamp before releasing the lock + ref := entry.ref + entry.touchedAt = now + + return ref, true + } + } + + return 0, false +} + +func (s *refCacheStripe) setRef(now time.Time, series labels.Labels, fp model.Fingerprint, ref uint64) { + s.refsMu.Lock() + defer s.refsMu.Unlock() + + // Check if already exists within the entries. + for _, entry := range s.refs[fp] { + if !labels.Equal(entry.lbs, series) { + continue + } + + entry.ref = ref + entry.touchedAt = now + return + } + + // The entry doesn't exist, so we have to add a new one. + s.refs[fp] = append(s.refs[fp], &refCacheEntry{lbs: series, ref: ref, touchedAt: now}) +} + +func (s *refCacheStripe) purge(keepUntil time.Time) { + s.refsMu.Lock() + defer s.refsMu.Unlock() + + for fp, entries := range s.refs { + // Since we do expect very few fingerprint collisions, we + // have an optimized implementation for the common case. + if len(entries) == 1 { + if entries[0].touchedAt.Before(keepUntil) { + delete(s.refs, fp) + } + + continue + } + + // We have more entries, which means there's a collision, + // so we have to iterate over the entries. + for i := 0; i < len(entries); { + if entries[i].touchedAt.Before(keepUntil) { + entries = append(entries[:i], entries[i+1:]...) + } else { + i++ + } + } + + // Either update or delete the entries in the map + if len(entries) == 0 { + delete(s.refs, fp) + } else { + s.refs[fp] = entries + } + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/user_bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/user_bucket_client.go new file mode 100644 index 000000000000..1705748f2e4c --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/user_bucket_client.go @@ -0,0 +1,120 @@ +package tsdb + +import ( + "context" + "fmt" + "io" + "strings" + + "github.com/thanos-io/thanos/pkg/objstore" +) + +// UserBucketReaderClient is a wrapper around a objstore.BucketReader that reads from user-specific subfolder. +type UserBucketReaderClient struct { + userID string + bucket objstore.BucketReader +} + +// UserBucketClient is a wrapper around a objstore.Bucket that prepends writes with a userID +type UserBucketClient struct { + UserBucketReaderClient + bucket objstore.Bucket +} + +func NewUserBucketClient(userID string, bucket objstore.Bucket) *UserBucketClient { + return &UserBucketClient{ + UserBucketReaderClient: UserBucketReaderClient{ + userID: userID, + bucket: bucket, + }, + bucket: bucket, + } +} + +func (b *UserBucketReaderClient) fullName(name string) string { + return fmt.Sprintf("%s/%s", b.userID, name) +} + +// Close implements io.Closer +func (b *UserBucketClient) Close() error { return b.bucket.Close() } + +// Upload the contents of the reader as an object into the bucket. +func (b *UserBucketClient) Upload(ctx context.Context, name string, r io.Reader) error { + return b.bucket.Upload(ctx, b.fullName(name), r) +} + +// Delete removes the object with the given name. +func (b *UserBucketClient) Delete(ctx context.Context, name string) error { + return b.bucket.Delete(ctx, b.fullName(name)) +} + +// Name returns the bucket name for the provider. +func (b *UserBucketClient) Name() string { return b.bucket.Name() } + +// Iter calls f for each entry in the given directory (not recursive.). The argument to f is the full +// object name including the prefix of the inspected directory. +func (b *UserBucketReaderClient) Iter(ctx context.Context, dir string, f func(string) error) error { + return b.bucket.Iter(ctx, b.fullName(dir), func(s string) error { + /* + Since all objects are prefixed with the userID we need to strip the userID + upon passing to the processing function + */ + return f(strings.Join(strings.Split(s, "/")[1:], "/")) + }) +} + +// Get returns a reader for the given object name. +func (b *UserBucketReaderClient) Get(ctx context.Context, name string) (io.ReadCloser, error) { + return b.bucket.Get(ctx, b.fullName(name)) +} + +// GetRange returns a new range reader for the given object name and range. +func (b *UserBucketReaderClient) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { + return b.bucket.GetRange(ctx, b.fullName(name), off, length) +} + +// Exists checks if the given object exists in the bucket. +func (b *UserBucketReaderClient) Exists(ctx context.Context, name string) (bool, error) { + return b.bucket.Exists(ctx, b.fullName(name)) +} + +// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. +func (b *UserBucketReaderClient) IsObjNotFoundErr(err error) bool { + return b.bucket.IsObjNotFoundErr(err) +} + +// ObjectSize returns the size of the specified object. +func (b *UserBucketReaderClient) ObjectSize(ctx context.Context, name string) (uint64, error) { + return b.bucket.ObjectSize(ctx, b.fullName(name)) +} + +// ReaderWithExpectedErrs allows to specify a filter that marks certain errors as expected, so it will not increment +// thanos_objstore_bucket_operation_failures_total metric. +func (b *UserBucketReaderClient) ReaderWithExpectedErrs(fn objstore.IsOpFailureExpectedFunc) objstore.BucketReader { + if ib, ok := b.bucket.(objstore.InstrumentedBucketReader); ok { + return &UserBucketReaderClient{ + userID: b.userID, + bucket: ib.ReaderWithExpectedErrs(fn), + } + } + + return b +} + +// WithExpectedErrs allows to specify a filter that marks certain errors as expected, so it will not increment +// thanos_objstore_bucket_operation_failures_total metric. +func (b *UserBucketClient) WithExpectedErrs(fn objstore.IsOpFailureExpectedFunc) objstore.Bucket { + if ib, ok := b.bucket.(objstore.InstrumentedBucket); ok { + nb := ib.WithExpectedErrs(fn) + + return &UserBucketClient{ + UserBucketReaderClient: UserBucketReaderClient{ + userID: b.userID, + bucket: nb, + }, + bucket: nb, + } + } + + return b +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/util.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/util.go new file mode 100644 index 000000000000..c13f5a2b7c4f --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/util.go @@ -0,0 +1,17 @@ +package tsdb + +import ( + "github.com/oklog/ulid" + + "github.com/cortexproject/cortex/pkg/ingester/client" +) + +// HashBlockID returns a 32-bit hash of the block ID useful for +// ring-based sharding. +func HashBlockID(id ulid.ULID) uint32 { + h := client.HashNew32() + for _, b := range id { + h = client.HashAddByte32(h, b) + } + return h +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_inmemory_server.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_inmemory_server.go new file mode 100644 index 000000000000..4f1afceb47a8 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_inmemory_server.go @@ -0,0 +1,56 @@ +package storegateway + +import ( + "context" + + "github.com/pkg/errors" + "github.com/prometheus/prometheus/storage" + "github.com/thanos-io/thanos/pkg/store/storepb" +) + +// BucketStoreSeriesServer is an fake in-memory gRPC server used to +// call Thanos BucketStore.Series() without having to go through the +// gRPC networking stack. +type BucketStoreSeriesServer struct { + // This field just exist to pseudo-implement the unused methods of the interface. + storepb.Store_SeriesServer + + ctx context.Context + + SeriesSet []*storepb.Series + Warnings storage.Warnings +} + +func NewBucketStoreSeriesServer(ctx context.Context) *BucketStoreSeriesServer { + return &BucketStoreSeriesServer{ctx: ctx} +} + +func (s *BucketStoreSeriesServer) Send(r *storepb.SeriesResponse) error { + if r.GetWarning() != "" { + s.Warnings = append(s.Warnings, errors.New(r.GetWarning())) + } + + if recvSeries := r.GetSeries(); recvSeries != nil { + // Thanos uses a pool for the chunks and may use other pools in the future. + // Given we need to retain the reference after the pooled slices are recycled, + // we need to do a copy here. We prefer to stay on the safest side at this stage + // so we do a marshal+unmarshal to copy the whole series. + recvSeriesData, err := recvSeries.Marshal() + if err != nil { + return errors.Wrap(err, "marshal received series") + } + + copiedSeries := &storepb.Series{} + if err = copiedSeries.Unmarshal(recvSeriesData); err != nil { + return errors.Wrap(err, "unmarshal received series") + } + + s.SeriesSet = append(s.SeriesSet, copiedSeries) + } + + return nil +} + +func (s *BucketStoreSeriesServer) Context() context.Context { + return s.ctx +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go new file mode 100644 index 000000000000..764c86740efd --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go @@ -0,0 +1,193 @@ +package storegateway + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/cortexproject/cortex/pkg/util" +) + +// BucketStoreMetrics aggregates metrics exported by Thanos Bucket Store +// and re-exports those aggregates as Cortex metrics. +type BucketStoreMetrics struct { + // Maps userID -> registry + regsMu sync.Mutex + regs map[string]*prometheus.Registry + + // exported metrics, gathered from Thanos BucketStore + blockLoads *prometheus.Desc + blockLoadFailures *prometheus.Desc + blockDrops *prometheus.Desc + blockDropFailures *prometheus.Desc + blocksLoaded *prometheus.Desc + seriesDataTouched *prometheus.Desc + seriesDataFetched *prometheus.Desc + seriesDataSizeTouched *prometheus.Desc + seriesDataSizeFetched *prometheus.Desc + seriesBlocksQueried *prometheus.Desc + seriesGetAllDuration *prometheus.Desc + seriesMergeDuration *prometheus.Desc + seriesRefetches *prometheus.Desc + resultSeriesCount *prometheus.Desc + + cachedPostingsCompressions *prometheus.Desc + cachedPostingsCompressionErrors *prometheus.Desc + cachedPostingsCompressionTimeSeconds *prometheus.Desc + cachedPostingsOriginalSizeBytes *prometheus.Desc + cachedPostingsCompressedSizeBytes *prometheus.Desc +} + +func NewBucketStoreMetrics() *BucketStoreMetrics { + return &BucketStoreMetrics{ + regs: map[string]*prometheus.Registry{}, + + blockLoads: prometheus.NewDesc( + "bucket_store_block_loads_total", + "Total number of remote block loading attempts.", + nil, nil), + blockLoadFailures: prometheus.NewDesc( + "bucket_store_block_load_failures_total", + "Total number of failed remote block loading attempts.", + nil, nil), + blockDrops: prometheus.NewDesc( + "bucket_store_block_drops_total", + "Total number of local blocks that were dropped.", + nil, nil), + blockDropFailures: prometheus.NewDesc( + "bucket_store_block_drop_failures_total", + "Total number of local blocks that failed to be dropped.", + nil, nil), + blocksLoaded: prometheus.NewDesc( + "bucket_store_blocks_loaded", + "Number of currently loaded blocks.", + nil, nil), + seriesDataTouched: prometheus.NewDesc( + "bucket_store_series_data_touched", + "How many items of a data type in a block were touched for a single series request.", + []string{"data_type"}, nil), + seriesDataFetched: prometheus.NewDesc( + "bucket_store_series_data_fetched", + "How many items of a data type in a block were fetched for a single series request.", + []string{"data_type"}, nil), + seriesDataSizeTouched: prometheus.NewDesc( + "bucket_store_series_data_size_touched_bytes", + "Size of all items of a data type in a block were touched for a single series request.", + []string{"data_type"}, nil), + seriesDataSizeFetched: prometheus.NewDesc( + "bucket_store_series_data_size_fetched_bytes", + "Size of all items of a data type in a block were fetched for a single series request.", + []string{"data_type"}, nil), + seriesBlocksQueried: prometheus.NewDesc( + "bucket_store_series_blocks_queried", + "Number of blocks in a bucket store that were touched to satisfy a query.", + nil, nil), + + seriesGetAllDuration: prometheus.NewDesc( + "bucket_store_series_get_all_duration_seconds", + "Time it takes until all per-block prepares and preloads for a query are finished.", + nil, nil), + seriesMergeDuration: prometheus.NewDesc( + "bucket_store_series_merge_duration_seconds", + "Time it takes to merge sub-results from all queried blocks into a single result.", + nil, nil), + seriesRefetches: prometheus.NewDesc( + "bucket_store_series_refetches_total", + "Total number of cases where the built-in max series size was not enough to fetch series from index, resulting in refetch.", + nil, nil), + resultSeriesCount: prometheus.NewDesc( + "bucket_store_series_result_series", + "Number of series observed in the final result of a query.", + nil, nil), + + cachedPostingsCompressions: prometheus.NewDesc( + "bucket_store_cached_postings_compressions_total", + "Number of postings compressions and decompressions when storing to index cache.", + []string{"op"}, nil), + cachedPostingsCompressionErrors: prometheus.NewDesc( + "bucket_store_cached_postings_compression_errors_total", + "Number of postings compression and decompression errors.", + []string{"op"}, nil), + cachedPostingsCompressionTimeSeconds: prometheus.NewDesc( + "bucket_store_cached_postings_compression_time_seconds", + "Time spent compressing and decompressing postings when storing to / reading from postings cache.", + []string{"op"}, nil), + cachedPostingsOriginalSizeBytes: prometheus.NewDesc( + "bucket_store_cached_postings_original_size_bytes_total", + "Original size of postings stored into cache.", + nil, nil), + cachedPostingsCompressedSizeBytes: prometheus.NewDesc( + "bucket_store_cached_postings_compressed_size_bytes_total", + "Compressed size of postings stored into cache.", + nil, nil), + } +} + +func (m *BucketStoreMetrics) AddUserRegistry(user string, reg *prometheus.Registry) { + m.regsMu.Lock() + m.regs[user] = reg + m.regsMu.Unlock() +} + +func (m *BucketStoreMetrics) registries() map[string]*prometheus.Registry { + regs := map[string]*prometheus.Registry{} + + m.regsMu.Lock() + defer m.regsMu.Unlock() + for uid, r := range m.regs { + regs[uid] = r + } + + return regs +} + +func (m *BucketStoreMetrics) Describe(out chan<- *prometheus.Desc) { + out <- m.blockLoads + out <- m.blockLoadFailures + out <- m.blockDrops + out <- m.blockDropFailures + out <- m.blocksLoaded + out <- m.seriesDataTouched + out <- m.seriesDataFetched + out <- m.seriesDataSizeTouched + out <- m.seriesDataSizeFetched + out <- m.seriesBlocksQueried + out <- m.seriesGetAllDuration + out <- m.seriesMergeDuration + out <- m.seriesRefetches + out <- m.resultSeriesCount + + out <- m.cachedPostingsCompressions + out <- m.cachedPostingsCompressionErrors + out <- m.cachedPostingsCompressionTimeSeconds + out <- m.cachedPostingsOriginalSizeBytes + out <- m.cachedPostingsCompressedSizeBytes +} + +func (m *BucketStoreMetrics) Collect(out chan<- prometheus.Metric) { + data := util.BuildMetricFamiliesPerUserFromUserRegistries(m.registries()) + + data.SendSumOfCounters(out, m.blockLoads, "thanos_bucket_store_block_loads_total") + data.SendSumOfCounters(out, m.blockLoadFailures, "thanos_bucket_store_block_load_failures_total") + data.SendSumOfCounters(out, m.blockDrops, "thanos_bucket_store_block_drops_total") + data.SendSumOfCounters(out, m.blockDropFailures, "thanos_bucket_store_block_drop_failures_total") + + data.SendSumOfGauges(out, m.blocksLoaded, "thanos_bucket_store_blocks_loaded") + + data.SendSumOfSummariesWithLabels(out, m.seriesDataTouched, "thanos_bucket_store_series_data_touched", "data_type") + data.SendSumOfSummariesWithLabels(out, m.seriesDataFetched, "thanos_bucket_store_series_data_fetched", "data_type") + data.SendSumOfSummariesWithLabels(out, m.seriesDataSizeTouched, "thanos_bucket_store_series_data_size_touched_bytes", "data_type") + data.SendSumOfSummariesWithLabels(out, m.seriesDataSizeFetched, "thanos_bucket_store_series_data_size_fetched_bytes", "data_type") + data.SendSumOfSummariesWithLabels(out, m.seriesBlocksQueried, "thanos_bucket_store_series_blocks_queried") + + data.SendSumOfHistograms(out, m.seriesGetAllDuration, "thanos_bucket_store_series_get_all_duration_seconds") + data.SendSumOfHistograms(out, m.seriesMergeDuration, "thanos_bucket_store_series_merge_duration_seconds") + data.SendSumOfCounters(out, m.seriesRefetches, "thanos_bucket_store_series_refetches_total") + data.SendSumOfSummaries(out, m.resultSeriesCount, "thanos_bucket_store_series_result_series") + + data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressions, "thanos_bucket_store_cached_postings_compressions_total", "op") + data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressionErrors, "thanos_bucket_store_cached_postings_compression_errors_total", "op") + data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressionTimeSeconds, "thanos_bucket_store_cached_postings_compression_time_seconds", "op") + data.SendSumOfCountersWithLabels(out, m.cachedPostingsOriginalSizeBytes, "thanos_bucket_store_cached_postings_original_size_bytes_total") + data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressedSizeBytes, "thanos_bucket_store_cached_postings_compressed_size_bytes_total") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go new file mode 100644 index 000000000000..3deaac625442 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go @@ -0,0 +1,277 @@ +package storegateway + +import ( + "context" + "fmt" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/store" + storecache "github.com/thanos-io/thanos/pkg/store/cache" + "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/weaveworks/common/logging" + "google.golang.org/grpc/metadata" + + "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/spanlogger" +) + +// BucketStores is a multi-tenant wrapper of Thanos BucketStore. +type BucketStores struct { + logger log.Logger + cfg tsdb.Config + bucket objstore.Bucket + logLevel logging.Level + bucketStoreMetrics *BucketStoreMetrics + metaFetcherMetrics *MetadataFetcherMetrics + indexCacheMetrics prometheus.Collector + filters []block.MetadataFilter + + // Index cache shared across all tenants. + indexCache storecache.IndexCache + + // Keeps a bucket store for each tenant. + storesMu sync.RWMutex + stores map[string]*store.BucketStore + + // Metrics. + syncTimes prometheus.Histogram +} + +// NewBucketStores makes a new BucketStores. +func NewBucketStores(cfg tsdb.Config, filters []block.MetadataFilter, bucketClient objstore.Bucket, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*BucketStores, error) { + indexCacheRegistry := prometheus.NewRegistry() + + u := &BucketStores{ + logger: logger, + cfg: cfg, + bucket: bucketClient, + filters: filters, + stores: map[string]*store.BucketStore{}, + logLevel: logLevel, + bucketStoreMetrics: NewBucketStoreMetrics(), + metaFetcherMetrics: NewMetadataFetcherMetrics(), + indexCacheMetrics: tsdb.MustNewIndexCacheMetrics(cfg.BucketStore.IndexCache.Backend, indexCacheRegistry), + syncTimes: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "blocks_sync_seconds", + Help: "The total time it takes to perform a sync stores", + Buckets: []float64{0.1, 1, 10, 30, 60, 120, 300, 600, 900}, + }), + } + + // Init the index cache. + var err error + if u.indexCache, err = tsdb.NewIndexCache(cfg.BucketStore.IndexCache, logger, indexCacheRegistry); err != nil { + return nil, errors.Wrap(err, "create index cache") + } + + if reg != nil { + reg.MustRegister(u.bucketStoreMetrics, u.metaFetcherMetrics, u.indexCacheMetrics) + } + + return u, nil +} + +// InitialSync does an initial synchronization of blocks for all users. +func (u *BucketStores) InitialSync(ctx context.Context) error { + level.Info(u.logger).Log("msg", "synchronizing TSDB blocks for all users") + + if err := u.syncUsersBlocks(ctx, func(ctx context.Context, s *store.BucketStore) error { + return s.InitialSync(ctx) + }); err != nil { + level.Warn(u.logger).Log("msg", "failed to synchronize TSDB blocks", "err", err) + return err + } + + level.Info(u.logger).Log("msg", "successfully synchronized TSDB blocks for all users") + return nil +} + +// SyncBlocks synchronizes the stores state with the Bucket store for every user. +func (u *BucketStores) SyncBlocks(ctx context.Context) error { + if err := u.syncUsersBlocks(ctx, func(ctx context.Context, s *store.BucketStore) error { + return s.SyncBlocks(ctx) + }); err != nil { + return err + } + + return nil +} + +func (u *BucketStores) syncUsersBlocks(ctx context.Context, f func(context.Context, *store.BucketStore) error) error { + defer func(start time.Time) { + u.syncTimes.Observe(time.Since(start).Seconds()) + }(time.Now()) + + type job struct { + userID string + store *store.BucketStore + } + + wg := &sync.WaitGroup{} + jobs := make(chan job) + + // Create a pool of workers which will synchronize blocks. The pool size + // is limited in order to avoid to concurrently sync a lot of tenants in + // a large cluster. + for i := 0; i < u.cfg.BucketStore.TenantSyncConcurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + for job := range jobs { + if err := f(ctx, job.store); err != nil { + level.Warn(u.logger).Log("msg", "failed to synchronize TSDB blocks for user", "user", job.userID, "err", err) + } + } + }() + } + + // Iterate the bucket, lazily create a bucket store for each new user found + // and submit a sync job for each user. + err := u.bucket.Iter(ctx, "", func(s string) error { + user := strings.TrimSuffix(s, "/") + + bs, err := u.getOrCreateStore(user) + if err != nil { + return err + } + + select { + case jobs <- job{userID: user, store: bs}: + return nil + case <-ctx.Done(): + return ctx.Err() + } + }) + + // Wait until all workers completed. + close(jobs) + wg.Wait() + + return err +} + +// Series makes a series request to the underlying user bucket store. +func (u *BucketStores) Series(req *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { + log, ctx := spanlogger.New(srv.Context(), "BucketStores.Series") + defer log.Span.Finish() + + userID := getUserIDFromGRPCContext(ctx) + if userID == "" { + return fmt.Errorf("no userID") + } + + store := u.getStore(userID) + if store == nil { + return nil + } + + return store.Series(req, srv) +} + +func (u *BucketStores) getStore(userID string) *store.BucketStore { + u.storesMu.RLock() + store := u.stores[userID] + u.storesMu.RUnlock() + + return store +} + +func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, error) { + // Check if the store already exists. + bs := u.getStore(userID) + if bs != nil { + return bs, nil + } + + u.storesMu.Lock() + defer u.storesMu.Unlock() + + // Check again for the store in the event it was created in-between locks. + bs = u.stores[userID] + if bs != nil { + return bs, nil + } + + userLogger := util.WithUserID(userID, u.logger) + + level.Info(userLogger).Log("msg", "creating user bucket store") + + userBkt := tsdb.NewUserBucketClient(userID, u.bucket) + + fetcherReg := prometheus.NewRegistry() + fetcher, err := block.NewMetaFetcher( + userLogger, + u.cfg.BucketStore.MetaSyncConcurrency, + userBkt, + filepath.Join(u.cfg.BucketStore.SyncDir, userID), // The fetcher stores cached metas in the "meta-syncer/" sub directory + fetcherReg, + // The input filters MUST be before the ones we create here (order matters). + append(u.filters, []block.MetadataFilter{ + block.NewConsistencyDelayMetaFilter(userLogger, u.cfg.BucketStore.ConsistencyDelay, fetcherReg), + block.NewIgnoreDeletionMarkFilter(userLogger, userBkt, u.cfg.BucketStore.IgnoreDeletionMarksDelay), + // Filters out duplicate blocks that can be formed from two or more overlapping + // blocks that fully submatches the source blocks of the older blocks. + // TODO(pracucci) can this cause troubles with the upcoming blocks sharding in the store-gateway? + block.NewDeduplicateFilter(), + }...), + nil, + ) + if err != nil { + return nil, err + } + + bucketStoreReg := prometheus.NewRegistry() + bs, err = store.NewBucketStore( + userLogger, + bucketStoreReg, + userBkt, + fetcher, + filepath.Join(u.cfg.BucketStore.SyncDir, userID), + u.indexCache, + uint64(u.cfg.BucketStore.MaxChunkPoolBytes), + u.cfg.BucketStore.MaxSampleCount, + u.cfg.BucketStore.MaxConcurrent, + u.logLevel.String() == "debug", // Turn on debug logging, if the log level is set to debug + u.cfg.BucketStore.BlockSyncConcurrency, + nil, // Do not limit timerange. + false, // No need to enable backward compatibility with Thanos pre 0.8.0 queriers + u.cfg.BucketStore.BinaryIndexHeader, + u.cfg.BucketStore.IndexCache.PostingsCompression, + u.cfg.BucketStore.PostingOffsetsInMemSampling, + ) + if err != nil { + return nil, err + } + + u.stores[userID] = bs + u.metaFetcherMetrics.AddUserRegistry(userID, fetcherReg) + u.bucketStoreMetrics.AddUserRegistry(userID, bucketStoreReg) + + return bs, nil +} + +func getUserIDFromGRPCContext(ctx context.Context) string { + meta, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "" + } + + values := meta.Get(tsdb.TenantIDExternalLabel) + if len(values) != 1 { + return "" + } + + return values[0] +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go new file mode 100644 index 000000000000..9a33f85eca76 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go @@ -0,0 +1,294 @@ +package storegateway + +import ( + "context" + "flag" + "io" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/weaveworks/common/logging" + + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/ring/kv" + cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" + "github.com/cortexproject/cortex/pkg/util/services" +) + +const ( + syncReasonInitial = "initial" + syncReasonPeriodic = "periodic" + syncReasonRingChange = "ring-change" +) + +// Config holds the store gateway config. +type Config struct { + ShardingEnabled bool `yaml:"sharding_enabled"` + ShardingRing RingConfig `yaml:"sharding_ring"` +} + +// RegisterFlags registers the Config flags. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.ShardingRing.RegisterFlags(f) + + f.BoolVar(&cfg.ShardingEnabled, "experimental.store-gateway.sharding-enabled", false, "Shard blocks across multiple store gateway instances.") +} + +// StoreGateway is the Cortex service responsible to expose an API over the bucket +// where blocks are stored, supporting blocks sharding and replication across a pool +// of store gateway instances (optional). +type StoreGateway struct { + services.Service + + gatewayCfg Config + storageCfg cortex_tsdb.Config + logger log.Logger + stores *BucketStores + + // Ring used for sharding blocks. + ringLifecycler *ring.BasicLifecycler + ring *ring.Ring + + // Subservices manager (ring, lifecycler) + subservices *services.Manager + subservicesWatcher *services.FailureWatcher + + bucketSync *prometheus.CounterVec +} + +func NewStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.Config, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*StoreGateway, error) { + var ringStore kv.Client + + bucketClient, err := createBucketClient(storageCfg, logger, reg) + if err != nil { + return nil, err + } + + if gatewayCfg.ShardingEnabled { + ringStore, err = kv.NewClient(gatewayCfg.ShardingRing.KVStore, ring.GetCodec()) + if err != nil { + return nil, errors.Wrap(err, "create KV store client") + } + } + + return newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, logLevel, logger, reg) +} + +func newStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.Config, bucketClient objstore.Bucket, ringStore kv.Client, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*StoreGateway, error) { + var err error + var filters []block.MetadataFilter + + g := &StoreGateway{ + gatewayCfg: gatewayCfg, + storageCfg: storageCfg, + logger: logger, + bucketSync: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_storegateway_bucket_sync_total", + Help: "Total number of times the bucket sync operation trigged.", + }, []string{"reason"}), + } + + // Init metrics. + g.bucketSync.WithLabelValues(syncReasonInitial) + g.bucketSync.WithLabelValues(syncReasonPeriodic) + g.bucketSync.WithLabelValues(syncReasonRingChange) + + if gatewayCfg.ShardingEnabled { + lifecyclerCfg, err := gatewayCfg.ShardingRing.ToLifecyclerConfig() + if err != nil { + return nil, errors.Wrap(err, "invalid ring lifecycler config") + } + + // Define lifecycler delegates in reverse order (last to be called defined first because they're + // chained via "next delegate"). + delegate := ring.BasicLifecyclerDelegate(g) + delegate = ring.NewLeaveOnStoppingDelegate(delegate, logger) + delegate = ring.NewTokensPersistencyDelegate(gatewayCfg.ShardingRing.TokensFilePath, ring.JOINING, delegate, logger) + + g.ringLifecycler, err = ring.NewBasicLifecycler(lifecyclerCfg, RingNameForServer, RingKey, ringStore, delegate, logger, reg) + if err != nil { + return nil, errors.Wrap(err, "create ring lifecycler") + } + + ringCfg := gatewayCfg.ShardingRing.ToRingConfig() + g.ring, err = ring.NewWithStoreClientAndStrategy(ringCfg, RingNameForServer, RingKey, ringStore, &BlocksReplicationStrategy{}) + if err != nil { + return nil, errors.Wrap(err, "create ring client") + } + + if reg != nil { + reg.MustRegister(g.ring) + } + + // Filter blocks by the shard of this store-gateway instance if the + // sharding is enabled. + filters = append(filters, NewShardingMetadataFilter(g.ring, lifecyclerCfg.Addr, logger)) + } + + var storesReg prometheus.Registerer + if reg != nil { + storesReg = prometheus.WrapRegistererWithPrefix("cortex_storegateway_", reg) + } + + g.stores, err = NewBucketStores(storageCfg, filters, bucketClient, logLevel, logger, storesReg) + if err != nil { + return nil, errors.Wrap(err, "create bucket stores") + } + + g.Service = services.NewBasicService(g.starting, g.running, g.stopping) + + return g, nil +} + +func (g *StoreGateway) starting(ctx context.Context) (err error) { + // In case this function will return error we want to unregister the instance + // from the ring. We do it ensuring dependencies are gracefully stopped if they + // were already started. + defer func() { + if err == nil || g.subservices == nil { + return + } + + if stopErr := services.StopManagerAndAwaitStopped(context.Background(), g.subservices); stopErr != nil { + level.Error(g.logger).Log("msg", "failed to gracefully stop store-gateway dependencies", "err", stopErr) + } + }() + + if g.gatewayCfg.ShardingEnabled { + // First of all we register the instance in the ring and wait + // until the lifecycler successfully started. + if g.subservices, err = services.NewManager(g.ringLifecycler, g.ring); err != nil { + return errors.Wrap(err, "unable to start store-gateway dependencies") + } + + g.subservicesWatcher = services.NewFailureWatcher() + g.subservicesWatcher.WatchManager(g.subservices) + + if err = services.StartManagerAndAwaitHealthy(ctx, g.subservices); err != nil { + return errors.Wrap(err, "unable to start store-gateway dependencies") + } + + // Wait until the ring client detected this instance in the JOINING state to + // make sure that when we'll run the initial sync we already know the tokens + // assigned to this instance. + level.Info(g.logger).Log("msg", "waiting until store-gateway is JOINING in the ring") + if err := ring.WaitInstanceState(ctx, g.ring, g.ringLifecycler.GetInstanceID(), ring.JOINING); err != nil { + return err + } + level.Info(g.logger).Log("msg", "store-gateway is JOINING in the ring") + } + + // At this point, if sharding is enabled, the instance is registered with some tokens + // and we can run the initial synchronization. + g.bucketSync.WithLabelValues(syncReasonInitial).Inc() + if err = g.stores.InitialSync(ctx); err != nil { + return errors.Wrap(err, "initial blocks synchronization") + } + + if g.gatewayCfg.ShardingEnabled { + // Now that the initial sync is done, we should have loaded all blocks + // assigned to our shard, so we can switch to ACTIVE and start serving + // requests. + if err = g.ringLifecycler.ChangeState(ctx, ring.ACTIVE); err != nil { + return errors.Wrapf(err, "switch instance to %s in the ring", ring.ACTIVE) + } + } + + return nil +} + +func (g *StoreGateway) running(ctx context.Context) error { + var ringTickerChan <-chan time.Time + var ringLastTokens ring.TokenDescs + + syncTicker := time.NewTicker(g.storageCfg.BucketStore.SyncInterval) + defer syncTicker.Stop() + + if g.gatewayCfg.ShardingEnabled { + ringLastTokens = g.ring.GetAllTokens(ring.BlocksSync) + ringTicker := time.NewTicker(g.gatewayCfg.ShardingRing.RingCheckPeriod) + defer ringTicker.Stop() + ringTickerChan = ringTicker.C + } + + for { + select { + case <-syncTicker.C: + g.syncStores(ctx, syncReasonPeriodic) + case <-ringTickerChan: + currTokens := g.ring.GetAllTokens(ring.BlocksSync) + if !currTokens.Equals(ringLastTokens) { + ringLastTokens = currTokens + g.syncStores(ctx, syncReasonRingChange) + } + case <-ctx.Done(): + return nil + case err := <-g.subservicesWatcher.Chan(): + return errors.Wrap(err, "store gateway subservice failed") + } + } +} + +func (g *StoreGateway) stopping(_ error) error { + if g.subservices != nil { + return services.StopManagerAndAwaitStopped(context.Background(), g.subservices) + } + return nil +} + +func (g *StoreGateway) syncStores(ctx context.Context, reason string) { + level.Info(g.logger).Log("msg", "synchronizing TSDB blocks for all users", "reason", reason) + g.bucketSync.WithLabelValues(reason).Inc() + + if err := g.stores.SyncBlocks(ctx); err != nil && err != io.EOF { + level.Warn(g.logger).Log("msg", "failed to synchronize TSDB blocks", "reason", reason, "err", err) + } else { + level.Info(g.logger).Log("msg", "successfully synchronized TSDB blocks for all users", "reason", reason) + } +} + +func (g *StoreGateway) Series(req *storepb.SeriesRequest, srv storegatewaypb.StoreGateway_SeriesServer) error { + return g.stores.Series(req, srv) +} + +func (g *StoreGateway) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.IngesterDesc) (ring.IngesterState, ring.Tokens) { + // When we initialize the store-gateway instance in the ring we want to start from + // a clean situation, so whatever is the state we set it JOINING, while we keep existing + // tokens (if any) or the ones loaded from file. + var tokens []uint32 + if instanceExists { + tokens = instanceDesc.GetTokens() + } + + _, takenTokens := ringDesc.TokensFor(instanceID) + newTokens := ring.GenerateTokens(RingNumTokens-len(tokens), takenTokens) + + // Tokens sorting will be enforced by the parent caller. + tokens = append(tokens, newTokens...) + + return ring.JOINING, tokens +} + +func (g *StoreGateway) OnRingInstanceTokens(_ *ring.BasicLifecycler, tokens ring.Tokens) {} +func (g *StoreGateway) OnRingInstanceStopping(_ *ring.BasicLifecycler) {} + +func createBucketClient(cfg cortex_tsdb.Config, logger log.Logger, reg prometheus.Registerer) (objstore.Bucket, error) { + bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg, "cortex-bucket-stores", logger) + if err != nil { + return nil, errors.Wrap(err, "create bucket client") + } + + if reg != nil { + bucketClient = objstore.BucketWithMetrics( /* bucket label value */ "", bucketClient, prometheus.WrapRegistererWithPrefix("cortex_storegateway_", reg)) + } + + return bucketClient, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go new file mode 100644 index 000000000000..01d466b4558a --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go @@ -0,0 +1,53 @@ +package storegateway + +import ( + "net/http" + "text/template" + + "github.com/go-kit/kit/log/level" + + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" +) + +var ( + statusPageTemplate = template.Must(template.New("main").Parse(` + + + + + Cortex Store Gateway Ring + + +

Cortex Store Gateway Ring

+

{{ .Message }}

+ + `)) +) + +func writeMessage(w http.ResponseWriter, message string) { + w.WriteHeader(http.StatusOK) + err := statusPageTemplate.Execute(w, struct { + Message string + }{Message: message}) + + if err != nil { + level.Error(util.Logger).Log("msg", "unable to serve store gateway ring page", "err", err) + } +} + +func (c *StoreGateway) RingHandler(w http.ResponseWriter, req *http.Request) { + if !c.gatewayCfg.ShardingEnabled { + writeMessage(w, "Store gateway has no ring because sharding is disabled.") + return + } + + if c.State() != services.Running { + // we cannot read the ring before the store gateway is in Running state, + // because that would lead to race condition. + writeMessage(w, "Store gateway is not running yet.") + return + } + + c.ring.ServeHTTP(w, req) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go new file mode 100644 index 000000000000..48e96d08ad53 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go @@ -0,0 +1,107 @@ +package storegateway + +import ( + "flag" + "fmt" + "os" + "time" + + "github.com/go-kit/kit/log/level" + + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" +) + +const ( + // RingKey is the key under which we store the store gateways ring in the KVStore. + RingKey = "store-gateway" + + // RingNameForServer is the name of the ring used by the store gateway server. + RingNameForServer = "store-gateway" + + // RingNameForClient is the name of the ring used by the store gateway client (we need + // a different name to avoid clashing Prometheus metrics when running in single-binary). + RingNameForClient = "store-gateway-client" + + // We use a safe default instead of exposing to config option to the user + // in order to simplify the config. + RingNumTokens = 512 +) + +// RingConfig masks the ring lifecycler config which contains +// many options not really required by the store gateways ring. This config +// is used to strip down the config to the minimum, and avoid confusion +// to the user. +type RingConfig struct { + KVStore kv.Config `yaml:"kvstore"` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + ReplicationFactor int `yaml:"replication_factor"` + TokensFilePath string `yaml:"tokens_file_path"` + + // Instance details + InstanceID string `yaml:"instance_id" doc:"hidden"` + InstanceInterfaceNames []string `yaml:"instance_interface_names" doc:"hidden"` + InstancePort int `yaml:"instance_port" doc:"hidden"` + InstanceAddr string `yaml:"instance_addr" doc:"hidden"` + + // Injected internally + ListenPort int `yaml:"-"` + RingCheckPeriod time.Duration `yaml:"-"` +} + +// RegisterFlags adds the flags required to config this to the given FlagSet +func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { + hostname, err := os.Hostname() + if err != nil { + level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err) + os.Exit(1) + } + + // Ring flags + cfg.KVStore.RegisterFlagsWithPrefix("experimental.store-gateway.ring.", "collectors/", f) + f.DurationVar(&cfg.HeartbeatPeriod, "experimental.store-gateway.ring.heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring.") + f.DurationVar(&cfg.HeartbeatTimeout, "experimental.store-gateway.ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which store gateways are considered unhealthy within the ring.") + f.IntVar(&cfg.ReplicationFactor, "experimental.store-gateway.replication-factor", 3, "The replication factor to use when sharding blocks.") + f.StringVar(&cfg.TokensFilePath, "experimental.store-gateway.tokens-file-path", "", "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.") + + // Instance flags + cfg.InstanceInterfaceNames = []string{"eth0", "en0"} + f.Var((*flagext.Strings)(&cfg.InstanceInterfaceNames), "experimental.store-gateway.ring.instance-interface", "Name of network interface to read address from.") + f.StringVar(&cfg.InstanceAddr, "experimental.store-gateway.ring.instance-addr", "", "IP address to advertise in the ring.") + f.IntVar(&cfg.InstancePort, "experimental.store-gateway.ring.instance-port", 0, "Port to advertise in the ring (defaults to server.grpc-listen-port).") + f.StringVar(&cfg.InstanceID, "experimental.store-gateway.ring.instance-id", hostname, "Instance ID to register in the ring.") + + // Defaults for internal settings. + cfg.RingCheckPeriod = 5 * time.Second +} + +func (cfg *RingConfig) ToRingConfig() ring.Config { + rc := ring.Config{} + flagext.DefaultValues(&rc) + + rc.KVStore = cfg.KVStore + rc.HeartbeatTimeout = cfg.HeartbeatTimeout + rc.ReplicationFactor = cfg.ReplicationFactor + + return rc +} + +func (cfg *RingConfig) ToLifecyclerConfig() (ring.BasicLifecyclerConfig, error) { + instanceAddr, err := ring.GetInstanceAddr(cfg.InstanceAddr, cfg.InstanceInterfaceNames) + if err != nil { + return ring.BasicLifecyclerConfig{}, err + } + + instancePort := ring.GetInstancePort(cfg.InstancePort, cfg.ListenPort) + + return ring.BasicLifecyclerConfig{ + ID: cfg.InstanceID, + Addr: fmt.Sprintf("%s:%d", instanceAddr, instancePort), + HeartbeatPeriod: cfg.HeartbeatPeriod, + TokensObservePeriod: 0, + NumTokens: RingNumTokens, + }, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go new file mode 100644 index 000000000000..a8e8f6b40c91 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go @@ -0,0 +1,92 @@ +package storegateway + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/cortexproject/cortex/pkg/util" +) + +// This struct aggregates metrics exported by Thanos MetaFetcher +// and re-exports those aggregates as Cortex metrics. +type MetadataFetcherMetrics struct { + // Maps userID -> registry + regsMu sync.Mutex + regs map[string]*prometheus.Registry + + // Exported metrics, gathered from Thanos MetaFetcher + syncs *prometheus.Desc + syncFailures *prometheus.Desc + syncDuration *prometheus.Desc + syncConsistencyDelay *prometheus.Desc + synced *prometheus.Desc + + // Ignored: + // blocks_meta_modified + // blocks_meta_base_syncs_total +} + +func NewMetadataFetcherMetrics() *MetadataFetcherMetrics { + return &MetadataFetcherMetrics{ + regs: map[string]*prometheus.Registry{}, + + syncs: prometheus.NewDesc( + "blocks_meta_syncs_total", + "Total blocks metadata synchronization attempts", + nil, nil), + syncFailures: prometheus.NewDesc( + "blocks_meta_sync_failures_total", + "Total blocks metadata synchronization failures", + nil, nil), + syncDuration: prometheus.NewDesc( + "blocks_meta_sync_duration_seconds", + "Duration of the blocks metadata synchronization in seconds", + nil, nil), + syncConsistencyDelay: prometheus.NewDesc( + "blocks_meta_sync_consistency_delay_seconds", + "Configured consistency delay in seconds.", + nil, nil), + synced: prometheus.NewDesc( + "blocks_meta_synced", + "Reflects current state of synced blocks (over all tenants).", + []string{"state"}, nil), + } +} + +func (m *MetadataFetcherMetrics) AddUserRegistry(user string, reg *prometheus.Registry) { + m.regsMu.Lock() + m.regs[user] = reg + m.regsMu.Unlock() +} + +func (m *MetadataFetcherMetrics) registries() map[string]*prometheus.Registry { + regs := map[string]*prometheus.Registry{} + + m.regsMu.Lock() + defer m.regsMu.Unlock() + for uid, r := range m.regs { + regs[uid] = r + } + + return regs +} + +func (m *MetadataFetcherMetrics) Describe(out chan<- *prometheus.Desc) { + + out <- m.syncs + out <- m.syncFailures + out <- m.syncDuration + out <- m.syncConsistencyDelay + out <- m.synced +} + +func (m *MetadataFetcherMetrics) Collect(out chan<- prometheus.Metric) { + data := util.BuildMetricFamiliesPerUserFromUserRegistries(m.registries()) + + data.SendSumOfCounters(out, m.syncs, "blocks_meta_syncs_total") + data.SendSumOfCounters(out, m.syncFailures, "blocks_meta_sync_failures_total") + data.SendSumOfHistograms(out, m.syncDuration, "blocks_meta_sync_duration_seconds") + data.SendMaxOfGauges(out, m.syncConsistencyDelay, "consistency_delay_seconds") + data.SendSumOfGaugesWithLabels(out, m.synced, "blocks_meta_synced", "state") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_filters.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_filters.go new file mode 100644 index 000000000000..35e68c2ab65d --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_filters.go @@ -0,0 +1,60 @@ +package storegateway + +import ( + "context" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/ulid" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/extprom" + + "github.com/cortexproject/cortex/pkg/ring" + cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" +) + +const ( + shardExcludedMeta = "shard-excluded" +) + +// ShardingMetadataFilter represents struct that allows sharding using the ring. +// Not go-routine safe. +type ShardingMetadataFilter struct { + r *ring.Ring + instanceAddr string + logger log.Logger +} + +// NewShardingMetadataFilter creates ShardingMetadataFilter. +func NewShardingMetadataFilter(r *ring.Ring, instanceAddr string, logger log.Logger) *ShardingMetadataFilter { + return &ShardingMetadataFilter{ + r: r, + instanceAddr: instanceAddr, + logger: logger, + } +} + +// Filter filters out blocks not included within the current shard. +func (f *ShardingMetadataFilter) Filter(_ context.Context, metas map[ulid.ULID]*metadata.Meta, synced *extprom.TxGaugeVec, _ bool) error { + // Buffer internally used by the ring (give extra room for a JOINING + LEAVING instance). + buf := make([]ring.IngesterDesc, 0, f.r.ReplicationFactor()+2) + + for blockID := range metas { + key := cortex_tsdb.HashBlockID(blockID) + set, err := f.r.Get(key, ring.BlocksSync, buf) + + // If there are no healthy instances in the replication set or + // the replication set for this block doesn't include this instance + // then we filter it out. + if err != nil || !set.Includes(f.instanceAddr) { + if err != nil { + level.Warn(f.logger).Log("msg", "failed to get replication set for block", "block", blockID.String(), "err", err) + } + + synced.WithLabelValues(shardExcludedMeta).Inc() + delete(metas, blockID) + } + } + + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/replication_strategy.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/replication_strategy.go new file mode 100644 index 000000000000..25851081e7e8 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/replication_strategy.go @@ -0,0 +1,45 @@ +package storegateway + +import ( + "errors" + "time" + + "github.com/cortexproject/cortex/pkg/ring" +) + +type BlocksReplicationStrategy struct{} + +func (s *BlocksReplicationStrategy) Filter(instances []ring.IngesterDesc, op ring.Operation, replicationFactor int, heartbeatTimeout time.Duration) ([]ring.IngesterDesc, int, error) { + // Filter out unhealthy instances. + for i := 0; i < len(instances); { + if instances[i].IsHealthy(op, heartbeatTimeout) { + i++ + } else { + instances = append(instances[:i], instances[i+1:]...) + } + } + + // For the store-gateway use case we need that a block is loaded at least on + // 1 instance, no matter what is the replication factor set (no quorum logic). + if len(instances) == 0 { + return nil, 0, errors.New("no healthy store-gateway instance found for the replication set") + } + + maxFailures := len(instances) - 1 + return instances, maxFailures, nil +} + +func (s *BlocksReplicationStrategy) ShouldExtendReplicaSet(instance ring.IngesterDesc, op ring.Operation) bool { + switch op { + case ring.BlocksSync: + // If the instance is JOINING or LEAVING we should extend the replica set: + // - JOINING: the previous replica set should be kept while an instance is JOINING + // - LEAVING: the instance is going to be decommissioned soon so we need to include + // another replica in the set + return instance.GetState() == ring.JOINING || instance.GetState() == ring.LEAVING + case ring.BlocksRead: + return false + default: + return false + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.pb.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.pb.go new file mode 100644 index 000000000000..95c4c897f0af --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.pb.go @@ -0,0 +1,166 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gateway.proto + +package storegatewaypb + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + storepb "github.com/thanos-io/thanos/pkg/store/storepb" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } + +var fileDescriptor_f1a937782ebbded5 = []byte{ + // 204 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4d, 0x4f, 0x2c, 0x49, + 0x2d, 0x4f, 0xac, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0x72, 0x0b, 0x92, 0xa4, + 0xcc, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x4b, 0x32, 0x12, 0xf3, + 0xf2, 0x8b, 0x75, 0x33, 0xf3, 0xa1, 0x2c, 0xfd, 0x82, 0xec, 0x74, 0xfd, 0xe2, 0x92, 0xfc, 0xa2, + 0x54, 0x08, 0x59, 0x90, 0xa4, 0x5f, 0x54, 0x90, 0x0c, 0x31, 0xc3, 0xc8, 0x93, 0x8b, 0x27, 0x18, + 0x24, 0xe8, 0x0e, 0x31, 0x4a, 0xc8, 0x92, 0x8b, 0x2d, 0x38, 0xb5, 0x28, 0x33, 0xb5, 0x58, 0x48, + 0x54, 0x0f, 0xa2, 0x5d, 0x0f, 0xc2, 0x0f, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x91, 0x12, 0x43, + 0x17, 0x2e, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x35, 0x60, 0x74, 0x72, 0xb9, 0xf0, 0x50, 0x8e, 0xe1, + 0xc6, 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, 0xc7, + 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, + 0xc7, 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, + 0x2c, 0xc7, 0x10, 0xc5, 0x07, 0x76, 0x13, 0xdc, 0x27, 0x49, 0x6c, 0x60, 0x77, 0x19, 0x03, 0x02, + 0x00, 0x00, 0xff, 0xff, 0xc5, 0x38, 0xd0, 0xf6, 0xec, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// StoreGatewayClient is the client API for StoreGateway service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type StoreGatewayClient interface { + // Series streams each Series for given label matchers and time range. + // + // Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain + // partition of the single series, but once a new series is started to be streamed it means that no more data will + // be sent for previous one. + // + // Series are sorted. + Series(ctx context.Context, in *storepb.SeriesRequest, opts ...grpc.CallOption) (StoreGateway_SeriesClient, error) +} + +type storeGatewayClient struct { + cc *grpc.ClientConn +} + +func NewStoreGatewayClient(cc *grpc.ClientConn) StoreGatewayClient { + return &storeGatewayClient{cc} +} + +func (c *storeGatewayClient) Series(ctx context.Context, in *storepb.SeriesRequest, opts ...grpc.CallOption) (StoreGateway_SeriesClient, error) { + stream, err := c.cc.NewStream(ctx, &_StoreGateway_serviceDesc.Streams[0], "/gatewaypb.StoreGateway/Series", opts...) + if err != nil { + return nil, err + } + x := &storeGatewaySeriesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type StoreGateway_SeriesClient interface { + Recv() (*storepb.SeriesResponse, error) + grpc.ClientStream +} + +type storeGatewaySeriesClient struct { + grpc.ClientStream +} + +func (x *storeGatewaySeriesClient) Recv() (*storepb.SeriesResponse, error) { + m := new(storepb.SeriesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// StoreGatewayServer is the server API for StoreGateway service. +type StoreGatewayServer interface { + // Series streams each Series for given label matchers and time range. + // + // Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain + // partition of the single series, but once a new series is started to be streamed it means that no more data will + // be sent for previous one. + // + // Series are sorted. + Series(*storepb.SeriesRequest, StoreGateway_SeriesServer) error +} + +// UnimplementedStoreGatewayServer can be embedded to have forward compatible implementations. +type UnimplementedStoreGatewayServer struct { +} + +func (*UnimplementedStoreGatewayServer) Series(req *storepb.SeriesRequest, srv StoreGateway_SeriesServer) error { + return status.Errorf(codes.Unimplemented, "method Series not implemented") +} + +func RegisterStoreGatewayServer(s *grpc.Server, srv StoreGatewayServer) { + s.RegisterService(&_StoreGateway_serviceDesc, srv) +} + +func _StoreGateway_Series_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(storepb.SeriesRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StoreGatewayServer).Series(m, &storeGatewaySeriesServer{stream}) +} + +type StoreGateway_SeriesServer interface { + Send(*storepb.SeriesResponse) error + grpc.ServerStream +} + +type storeGatewaySeriesServer struct { + grpc.ServerStream +} + +func (x *storeGatewaySeriesServer) Send(m *storepb.SeriesResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _StoreGateway_serviceDesc = grpc.ServiceDesc{ + ServiceName: "gatewaypb.StoreGateway", + HandlerType: (*StoreGatewayServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Series", + Handler: _StoreGateway_Series_Handler, + ServerStreams: true, + }, + }, + Metadata: "gateway.proto", +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.proto b/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.proto new file mode 100644 index 000000000000..fdde78fe87e1 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb/gateway.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; +package gatewaypb; + +import "github.com/thanos-io/thanos/pkg/store/storepb/rpc.proto"; + +option go_package = "storegatewaypb"; + +service StoreGateway { + // Series streams each Series for given label matchers and time range. + // + // Series should strictly stream full series after series, optionally split by time. This means that a single frame can contain + // partition of the single series, but once a new series is started to be streamed it means that no more data will + // be sent for previous one. + // + // Series are sorted. + rpc Series(thanos.SeriesRequest) returns (stream thanos.SeriesResponse); +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/chunkcompat/compat.go b/vendor/github.com/cortexproject/cortex/pkg/util/chunkcompat/compat.go new file mode 100644 index 000000000000..9bd7e4545476 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/chunkcompat/compat.go @@ -0,0 +1,100 @@ +package chunkcompat + +import ( + "bytes" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + + "github.com/cortexproject/cortex/pkg/chunk" + prom_chunk "github.com/cortexproject/cortex/pkg/chunk/encoding" + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/util" +) + +// StreamsToMatrix converts a slice of QueryStreamResponse to a model.Matrix. +func StreamsToMatrix(from, through model.Time, responses []*client.QueryStreamResponse) (model.Matrix, error) { + result := model.Matrix{} + for _, response := range responses { + series, err := SeriesChunksToMatrix(from, through, response.Chunkseries) + if err != nil { + return nil, err + } + + result = append(result, series...) + } + return result, nil +} + +// SeriesChunksToMatrix converts slice of []client.TimeSeriesChunk to a model.Matrix. +func SeriesChunksToMatrix(from, through model.Time, serieses []client.TimeSeriesChunk) (model.Matrix, error) { + if serieses == nil { + return nil, nil + } + + result := model.Matrix{} + for _, series := range serieses { + metric := client.FromLabelAdaptersToMetric(series.Labels) + chunks, err := FromChunks("", client.FromLabelAdaptersToLabels(series.Labels), series.Chunks) + if err != nil { + return nil, err + } + + samples := []model.SamplePair{} + for _, chunk := range chunks { + ss, err := chunk.Samples(from, through) + if err != nil { + return nil, err + } + samples = util.MergeSampleSets(samples, ss) + } + + result = append(result, &model.SampleStream{ + Metric: metric, + Values: samples, + }) + } + return result, nil +} + +// FromChunks converts []client.Chunk to []chunk.Chunk. +func FromChunks(userID string, metric labels.Labels, in []client.Chunk) ([]chunk.Chunk, error) { + out := make([]chunk.Chunk, 0, len(in)) + for _, i := range in { + o, err := prom_chunk.NewForEncoding(prom_chunk.Encoding(byte(i.Encoding))) + if err != nil { + return nil, err + } + + if err := o.UnmarshalFromBuf(i.Data); err != nil { + return nil, err + } + + firstTime, lastTime := model.Time(i.StartTimestampMs), model.Time(i.EndTimestampMs) + // As the lifetime of this chunk is scopes to this request, we don't need + // to supply a fingerprint. + out = append(out, chunk.NewChunk(userID, 0, metric, o, firstTime, lastTime)) + } + return out, nil +} + +// ToChunks converts []chunk.Chunk to []client.Chunk. +func ToChunks(in []chunk.Chunk) ([]client.Chunk, error) { + out := make([]client.Chunk, 0, len(in)) + for _, i := range in { + wireChunk := client.Chunk{ + StartTimestampMs: int64(i.From), + EndTimestampMs: int64(i.Through), + Encoding: int32(i.Data.Encoding()), + } + + buf := bytes.NewBuffer(make([]byte, 0, prom_chunk.ChunkLen)) + if err := i.Data.Marshal(buf); err != nil { + return nil, err + } + + wireChunk.Data = buf.Bytes() + out = append(out, wireChunk) + } + return out, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpc/healthcheck/health_check.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpc/healthcheck/health_check.go new file mode 100644 index 000000000000..6b60a6d47b70 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpc/healthcheck/health_check.go @@ -0,0 +1,53 @@ +package healthcheck + +import ( + "context" + + "github.com/gogo/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/cortexproject/cortex/pkg/util/services" +) + +// HealthCheck fulfills the grpc_health_v1.HealthServer interface by ensuring +// the services being managed by the provided service manager are healthy. +type HealthCheck struct { + sm *services.Manager +} + +// New returns a new HealthCheck for the provided service manager. +func New(sm *services.Manager) *HealthCheck { + return &HealthCheck{ + sm: sm, + } +} + +// Check implements the grpc healthcheck. +func (h *HealthCheck) Check(_ context.Context, _ *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { + if !h.isHealthy() { + return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_NOT_SERVING}, nil + } + + return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil +} + +// Watch implements the grpc healthcheck. +func (h *HealthCheck) Watch(_ *grpc_health_v1.HealthCheckRequest, _ grpc_health_v1.Health_WatchServer) error { + return status.Error(codes.Unimplemented, "Watching is not supported") +} + +// isHealthy returns whether the Cortex instance should be considered healthy. +func (h *HealthCheck) isHealthy() bool { + states := h.sm.ServicesByState() + + // Given this is an health check endpoint for the whole instance, we should consider + // it healthy after all services have been started (running) and until all + // services are terminated. Some services, like ingesters, are still + // fully functioning while stopping. + if len(states[services.New]) > 0 || len(states[services.Starting]) > 0 || len(states[services.Failed]) > 0 { + return false + } + + return len(states[services.Running]) > 0 || len(states[services.Stopping]) > 0 +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/push/push.go b/vendor/github.com/cortexproject/cortex/pkg/util/push/push.go new file mode 100644 index 000000000000..f8eeec5d516e --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/push/push.go @@ -0,0 +1,43 @@ +package push + +import ( + "context" + "net/http" + + "github.com/go-kit/kit/log/level" + "github.com/weaveworks/common/httpgrpc" + + "github.com/cortexproject/cortex/pkg/distributor" + "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/util" +) + +// Handler is a http.Handler which accepts WriteRequests. +func Handler(cfg distributor.Config, push func(context.Context, *client.WriteRequest) (*client.WriteResponse, error)) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + compressionType := util.CompressionTypeFor(r.Header.Get("X-Prometheus-Remote-Write-Version")) + var req client.PreallocWriteRequest + _, err := util.ParseProtoReader(r.Context(), r.Body, int(r.ContentLength), cfg.MaxRecvMsgSize, &req, compressionType) + logger := util.WithContext(r.Context(), util.Logger) + if err != nil { + level.Error(logger).Log("err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if req.Source == 0 { + req.Source = client.API + } + + if _, err := push(r.Context(), &req.WriteRequest); err != nil { + resp, ok := httpgrpc.HTTPResponseFromError(err) + if !ok { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if resp.GetCode() != 202 { + level.Error(logger).Log("msg", "push error", "err", err) + } + http.Error(w, string(resp.Body), int(resp.Code)) + } + }) +} diff --git a/vendor/github.com/go-openapi/analysis/.codecov.yml b/vendor/github.com/go-openapi/analysis/.codecov.yml new file mode 100644 index 000000000000..841c4281e23d --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.codecov.yml @@ -0,0 +1,5 @@ +coverage: + status: + patch: + default: + target: 80% diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore new file mode 100644 index 000000000000..87c3bd3e66e0 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +coverage.txt +*.cov +.idea diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml new file mode 100644 index 000000000000..76af8ab1c876 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.golangci.yml @@ -0,0 +1,27 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 40 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - gochecknoinits + # scopelint is useful, but also reports false positives + # that unfortunately can't be disabled. So we disable the + # linter rather than changing code that works. + # see: https://github.com/kyoh86/scopelint/issues/4 + - scopelint diff --git a/vendor/github.com/go-openapi/analysis/.travis.yml b/vendor/github.com/go-openapi/analysis/.travis.yml new file mode 100644 index 000000000000..7ecf865c21c4 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go +notifications: + slack: + secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= +script: +- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..9322b065e37a --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/go-openapi/analysis/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md new file mode 100644 index 000000000000..3724bfc48ea1 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/README.md @@ -0,0 +1,9 @@ +# OpenAPI initiative analysis [![Build Status](https://travis-ci.org/go-openapi/analysis.svg?branch=master)](https://travis-ci.org/go-openapi/analysis) [![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/analysis?status.svg)](http://godoc.org/github.com/go-openapi/analysis) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/analysis.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/analysis)](https://goreportcard.com/report/github.com/go-openapi/analysis) + + +A foundational library to analyze an OAI specification document for easier reasoning about the content. diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go new file mode 100644 index 000000000000..4d98718c4e62 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/analyzer.go @@ -0,0 +1,970 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + slashpath "path" + "strconv" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +type referenceAnalysis struct { + schemas map[string]spec.Ref + responses map[string]spec.Ref + parameters map[string]spec.Ref + items map[string]spec.Ref + headerItems map[string]spec.Ref + parameterItems map[string]spec.Ref + allRefs map[string]spec.Ref + pathItems map[string]spec.Ref +} + +func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { + r.allRefs["#"+key] = ref +} + +func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) { + r.items["#"+key] = items.Ref + r.addRef(key, items.Ref) + if location == "header" { + // NOTE: in swagger 2.0, headers and parameters (but not body param schemas) are simple schemas + // and $ref are not supported here. However it is possible to analyze this. + r.headerItems["#"+key] = items.Ref + } else { + r.parameterItems["#"+key] = items.Ref + } +} + +func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { + r.schemas["#"+key] = ref.Schema.Ref + r.addRef(key, ref.Schema.Ref) +} + +func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { + r.responses["#"+key] = resp.Ref + r.addRef(key, resp.Ref) +} + +func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { + r.parameters["#"+key] = param.Ref + r.addRef(key, param.Ref) +} + +func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) { + r.pathItems["#"+key] = pathItem.Ref + r.addRef(key, pathItem.Ref) +} + +type patternAnalysis struct { + parameters map[string]string + headers map[string]string + items map[string]string + schemas map[string]string + allPatterns map[string]string +} + +func (p *patternAnalysis) addPattern(key, pattern string) { + p.allPatterns["#"+key] = pattern +} + +func (p *patternAnalysis) addParameterPattern(key, pattern string) { + p.parameters["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addHeaderPattern(key, pattern string) { + p.headers["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addItemsPattern(key, pattern string) { + p.items["#"+key] = pattern + p.addPattern(key, pattern) +} + +func (p *patternAnalysis) addSchemaPattern(key, pattern string) { + p.schemas["#"+key] = pattern + p.addPattern(key, pattern) +} + +type enumAnalysis struct { + parameters map[string][]interface{} + headers map[string][]interface{} + items map[string][]interface{} + schemas map[string][]interface{} + allEnums map[string][]interface{} +} + +func (p *enumAnalysis) addEnum(key string, enum []interface{}) { + p.allEnums["#"+key] = enum +} + +func (p *enumAnalysis) addParameterEnum(key string, enum []interface{}) { + p.parameters["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addHeaderEnum(key string, enum []interface{}) { + p.headers["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addItemsEnum(key string, enum []interface{}) { + p.items["#"+key] = enum + p.addEnum(key, enum) +} + +func (p *enumAnalysis) addSchemaEnum(key string, enum []interface{}) { + p.schemas["#"+key] = enum + p.addEnum(key, enum) +} + +// New takes a swagger spec object and returns an analyzed spec document. +// The analyzed document contains a number of indices that make it easier to +// reason about semantics of a swagger specification for use in code generation +// or validation etc. +func New(doc *spec.Swagger) *Spec { + a := &Spec{ + spec: doc, + references: referenceAnalysis{}, + patterns: patternAnalysis{}, + enums: enumAnalysis{}, + } + a.reset() + a.initialize() + return a +} + +// Spec is an analyzed specification object. It takes a swagger spec object and turns it into a registry +// with a bunch of utility methods to act on the information in the spec. +type Spec struct { + spec *spec.Swagger + consumes map[string]struct{} + produces map[string]struct{} + authSchemes map[string]struct{} + operations map[string]map[string]*spec.Operation + references referenceAnalysis + patterns patternAnalysis + enums enumAnalysis + allSchemas map[string]SchemaRef + allOfs map[string]SchemaRef +} + +func (s *Spec) reset() { + s.consumes = make(map[string]struct{}, 150) + s.produces = make(map[string]struct{}, 150) + s.authSchemes = make(map[string]struct{}, 150) + s.operations = make(map[string]map[string]*spec.Operation, 150) + s.allSchemas = make(map[string]SchemaRef, 150) + s.allOfs = make(map[string]SchemaRef, 150) + s.references.schemas = make(map[string]spec.Ref, 150) + s.references.pathItems = make(map[string]spec.Ref, 150) + s.references.responses = make(map[string]spec.Ref, 150) + s.references.parameters = make(map[string]spec.Ref, 150) + s.references.items = make(map[string]spec.Ref, 150) + s.references.headerItems = make(map[string]spec.Ref, 150) + s.references.parameterItems = make(map[string]spec.Ref, 150) + s.references.allRefs = make(map[string]spec.Ref, 150) + s.patterns.parameters = make(map[string]string, 150) + s.patterns.headers = make(map[string]string, 150) + s.patterns.items = make(map[string]string, 150) + s.patterns.schemas = make(map[string]string, 150) + s.patterns.allPatterns = make(map[string]string, 150) + s.enums.parameters = make(map[string][]interface{}, 150) + s.enums.headers = make(map[string][]interface{}, 150) + s.enums.items = make(map[string][]interface{}, 150) + s.enums.schemas = make(map[string][]interface{}, 150) + s.enums.allEnums = make(map[string][]interface{}, 150) +} + +func (s *Spec) reload() { + s.reset() + s.initialize() +} + +func (s *Spec) initialize() { + for _, c := range s.spec.Consumes { + s.consumes[c] = struct{}{} + } + for _, c := range s.spec.Produces { + s.produces[c] = struct{}{} + } + for _, ss := range s.spec.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + for path, pathItem := range s.AllPaths() { + s.analyzeOperations(path, &pathItem) + } + + for name, parameter := range s.spec.Parameters { + refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) + if parameter.Items != nil { + s.analyzeItems("items", parameter.Items, refPref, "parameter") + } + if parameter.In == "body" && parameter.Schema != nil { + s.analyzeSchema("schema", *parameter.Schema, refPref) + } + if parameter.Pattern != "" { + s.patterns.addParameterPattern(refPref, parameter.Pattern) + } + if len(parameter.Enum) > 0 { + s.enums.addParameterEnum(refPref, parameter.Enum) + } + } + + for name, response := range s.spec.Responses { + refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) + for k, v := range response.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + if v.Items != nil { + s.analyzeItems("items", v.Items, hRefPref, "header") + } + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + if len(v.Enum) > 0 { + s.enums.addHeaderEnum(hRefPref, v.Enum) + } + } + if response.Schema != nil { + s.analyzeSchema("schema", *response.Schema, refPref) + } + } + + for name, schema := range s.spec.Definitions { + s.analyzeSchema(name, schema, "/definitions") + } + // TODO: after analyzing all things and flattening schemas etc + // resolve all the collected references to their final representations + // best put in a separate method because this could get expensive +} + +func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { + // TODO: resolve refs here? + // Currently, operations declared via pathItem $ref are known only after expansion + op := pi + if pi.Ref.String() != "" { + key := slashpath.Join("/paths", jsonpointer.Escape(path)) + s.references.addPathItemRef(key, pi) + } + s.analyzeOperation("GET", path, op.Get) + s.analyzeOperation("PUT", path, op.Put) + s.analyzeOperation("POST", path, op.Post) + s.analyzeOperation("PATCH", path, op.Patch) + s.analyzeOperation("DELETE", path, op.Delete) + s.analyzeOperation("HEAD", path, op.Head) + s.analyzeOperation("OPTIONS", path, op.Options) + for i, param := range op.Parameters { + refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) + } + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + if len(param.Enum) > 0 { + s.enums.addParameterEnum(refPref, param.Enum) + } + if param.Items != nil { + s.analyzeItems("items", param.Items, refPref, "parameter") + } + if param.Schema != nil { + s.analyzeSchema("schema", *param.Schema, refPref) + } + } +} + +func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) { + if items == nil { + return + } + refPref := slashpath.Join(prefix, name) + s.analyzeItems(name, items.Items, refPref, location) + if items.Ref.String() != "" { + s.references.addItemsRef(refPref, items, location) + } + if items.Pattern != "" { + s.patterns.addItemsPattern(refPref, items.Pattern) + } + if len(items.Enum) > 0 { + s.enums.addItemsEnum(refPref, items.Enum) + } +} + +func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { + if op == nil { + return + } + + for _, c := range op.Consumes { + s.consumes[c] = struct{}{} + } + for _, c := range op.Produces { + s.produces[c] = struct{}{} + } + for _, ss := range op.Security { + for k := range ss { + s.authSchemes[k] = struct{}{} + } + } + if _, ok := s.operations[method]; !ok { + s.operations[method] = make(map[string]*spec.Operation) + } + s.operations[method][path] = op + prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) + for i, param := range op.Parameters { + refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) + if param.Ref.String() != "" { + s.references.addParamRef(refPref, ¶m) + } + if param.Pattern != "" { + s.patterns.addParameterPattern(refPref, param.Pattern) + } + if len(param.Enum) > 0 { + s.enums.addParameterEnum(refPref, param.Enum) + } + s.analyzeItems("items", param.Items, refPref, "parameter") + if param.In == "body" && param.Schema != nil { + s.analyzeSchema("schema", *param.Schema, refPref) + } + } + if op.Responses != nil { + if op.Responses.Default != nil { + refPref := slashpath.Join(prefix, "responses", "default") + if op.Responses.Default.Ref.String() != "" { + s.references.addResponseRef(refPref, op.Responses.Default) + } + for k, v := range op.Responses.Default.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + } + if op.Responses.Default.Schema != nil { + s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref) + } + } + for k, res := range op.Responses.StatusCodeResponses { + refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) + if res.Ref.String() != "" { + s.references.addResponseRef(refPref, &res) + } + for k, v := range res.Headers { + hRefPref := slashpath.Join(refPref, "headers", k) + s.analyzeItems("items", v.Items, hRefPref, "header") + if v.Pattern != "" { + s.patterns.addHeaderPattern(hRefPref, v.Pattern) + } + if len(v.Enum) > 0 { + s.enums.addHeaderEnum(hRefPref, v.Enum) + } + } + if res.Schema != nil { + s.analyzeSchema("schema", *res.Schema, refPref) + } + } + } +} + +func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) { + refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) + schRef := SchemaRef{ + Name: name, + Schema: &schema, + Ref: spec.MustCreateRef("#" + refURI), + TopLevel: prefix == "/definitions", + } + + s.allSchemas["#"+refURI] = schRef + + if schema.Ref.String() != "" { + s.references.addSchemaRef(refURI, schRef) + } + if schema.Pattern != "" { + s.patterns.addSchemaPattern(refURI, schema.Pattern) + } + if len(schema.Enum) > 0 { + s.enums.addSchemaEnum(refURI, schema.Enum) + } + + for k, v := range schema.Definitions { + s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions")) + } + for k, v := range schema.Properties { + s.analyzeSchema(k, v, slashpath.Join(refURI, "properties")) + } + for k, v := range schema.PatternProperties { + // NOTE: swagger 2.0 does not support PatternProperties. + // However it is possible to analyze this in a schema + s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties")) + } + for i, v := range schema.AllOf { + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) + } + if len(schema.AllOf) > 0 { + s.allOfs["#"+refURI] = schRef + } + for i, v := range schema.AnyOf { + // NOTE: swagger 2.0 does not support anyOf constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) + } + for i, v := range schema.OneOf { + // NOTE: swagger 2.0 does not support oneOf constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) + } + if schema.Not != nil { + // NOTE: swagger 2.0 does not support "not" constructs. + // However it is possible to analyze this in a schema + s.analyzeSchema("not", *schema.Not, refURI) + } + if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { + s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI) + } + if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { + // NOTE: swagger 2.0 does not support AdditionalItems. + // However it is possible to analyze this in a schema + s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI) + } + if schema.Items != nil { + if schema.Items.Schema != nil { + s.analyzeSchema("items", *schema.Items.Schema, refURI) + } + for i, sch := range schema.Items.Schemas { + s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) + } + } +} + +// SecurityRequirement is a representation of a security requirement for an operation +type SecurityRequirement struct { + Name string + Scopes []string +} + +// SecurityRequirementsFor gets the security requirements for the operation +func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRequirement { + if s.spec.Security == nil && operation.Security == nil { + return nil + } + + schemes := s.spec.Security + if operation.Security != nil { + schemes = operation.Security + } + + result := [][]SecurityRequirement{} + for _, scheme := range schemes { + if len(scheme) == 0 { + // append a zero object for anonymous + result = append(result, []SecurityRequirement{{}}) + continue + } + var reqs []SecurityRequirement + for k, v := range scheme { + if v == nil { + v = []string{} + } + reqs = append(reqs, SecurityRequirement{Name: k, Scopes: v}) + } + result = append(result, reqs) + } + return result +} + +// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequirement) map[string]spec.SecurityScheme { + result := make(map[string]spec.SecurityScheme) + + for _, v := range requirements { + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + return result +} + +// SecurityDefinitionsFor gets the matching security definitions for a set of requirements +func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { + requirements := s.SecurityRequirementsFor(operation) + if len(requirements) == 0 { + return nil + } + + result := make(map[string]spec.SecurityScheme) + for _, reqs := range requirements { + for _, v := range reqs { + if v.Name == "" { + // optional requirement + continue + } + if _, ok := result[v.Name]; ok { + // duplicate requirement + continue + } + if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { + if definition != nil { + result[v.Name] = *definition + } + } + } + } + return result +} + +// ConsumesFor gets the mediatypes for the operation +func (s *Spec) ConsumesFor(operation *spec.Operation) []string { + + if len(operation.Consumes) == 0 { + cons := make(map[string]struct{}, len(s.spec.Consumes)) + for _, k := range s.spec.Consumes { + cons[k] = struct{}{} + } + return s.structMapKeys(cons) + } + + cons := make(map[string]struct{}, len(operation.Consumes)) + for _, c := range operation.Consumes { + cons[c] = struct{}{} + } + return s.structMapKeys(cons) +} + +// ProducesFor gets the mediatypes for the operation +func (s *Spec) ProducesFor(operation *spec.Operation) []string { + if len(operation.Produces) == 0 { + prod := make(map[string]struct{}, len(s.spec.Produces)) + for _, k := range s.spec.Produces { + prod[k] = struct{}{} + } + return s.structMapKeys(prod) + } + + prod := make(map[string]struct{}, len(operation.Produces)) + for _, c := range operation.Produces { + prod[c] = struct{}{} + } + return s.structMapKeys(prod) +} + +func mapKeyFromParam(param *spec.Parameter) string { + return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) +} + +func fieldNameFromParam(param *spec.Parameter) string { + // TODO: this should be x-go-name + if nm, ok := param.Extensions.GetString("go-name"); ok { + return nm + } + return swag.ToGoName(param.Name) +} + +// ErrorOnParamFunc is a callback function to be invoked +// whenever an error is encountered while resolving references +// on parameters. +// +// This function takes as input the spec.Parameter which triggered the +// error and the error itself. +// +// If the callback function returns false, the calling function should bail. +// +// If it returns true, the calling function should continue evaluating parameters. +// A nil ErrorOnParamFunc must be evaluated as equivalent to panic(). +type ErrorOnParamFunc func(spec.Parameter, error) bool + +func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter, callmeOnError ErrorOnParamFunc) { + for _, param := range parameters { + pr := param + if pr.Ref.String() != "" { + obj, _, err := pr.Ref.GetPointer().Get(s.spec) + if err != nil { + if callmeOnError != nil { + if callmeOnError(param, fmt.Errorf("invalid reference: %q", pr.Ref.String())) { + continue + } + break + } else { + panic(fmt.Sprintf("invalid reference: %q", pr.Ref.String())) + } + } + if objAsParam, ok := obj.(spec.Parameter); ok { + pr = objAsParam + } else { + if callmeOnError != nil { + if callmeOnError(param, fmt.Errorf("resolved reference is not a parameter: %q", pr.Ref.String())) { + continue + } + break + } else { + panic(fmt.Sprintf("resolved reference is not a parameter: %q", pr.Ref.String())) + } + } + } + res[mapKeyFromParam(&pr)] = pr + } +} + +// ParametersFor the specified operation id. +// +// Assumes parameters properly resolve references if any and that +// such references actually resolve to a parameter object. +// Otherwise, panics. +func (s *Spec) ParametersFor(operationID string) []spec.Parameter { + return s.SafeParametersFor(operationID, nil) +} + +// SafeParametersFor the specified operation id. +// +// Does not assume parameters properly resolve references or that +// such references actually resolve to a parameter object. +// +// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// parameters. If the callback is set to nil, panics upon errors. +func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter { + gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { + bag := make(map[string]spec.Parameter) + s.paramsAsMap(pi.Parameters, bag, callmeOnError) + s.paramsAsMap(op.Parameters, bag, callmeOnError) + + var res []spec.Parameter + for _, v := range bag { + res = append(res, v) + } + return res + } + for _, pi := range s.spec.Paths.Paths { + if pi.Get != nil && pi.Get.ID == operationID { + return gatherParams(&pi, pi.Get) + } + if pi.Head != nil && pi.Head.ID == operationID { + return gatherParams(&pi, pi.Head) + } + if pi.Options != nil && pi.Options.ID == operationID { + return gatherParams(&pi, pi.Options) + } + if pi.Post != nil && pi.Post.ID == operationID { + return gatherParams(&pi, pi.Post) + } + if pi.Patch != nil && pi.Patch.ID == operationID { + return gatherParams(&pi, pi.Patch) + } + if pi.Put != nil && pi.Put.ID == operationID { + return gatherParams(&pi, pi.Put) + } + if pi.Delete != nil && pi.Delete.ID == operationID { + return gatherParams(&pi, pi.Delete) + } + } + return nil +} + +// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +// +// Assumes parameters properly resolve references if any and that +// such references actually resolve to a parameter object. +// Otherwise, panics. +func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { + return s.SafeParamsFor(method, path, nil) +} + +// SafeParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that +// apply for the method and path. +// +// Does not assume parameters properly resolve references or that +// such references actually resolve to a parameter object. +// +// Upon error, invoke a ErrorOnParamFunc callback with the erroneous +// parameters. If the callback is set to nil, panics upon errors. +func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter { + res := make(map[string]spec.Parameter) + if pi, ok := s.spec.Paths.Paths[path]; ok { + s.paramsAsMap(pi.Parameters, res, callmeOnError) + s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res, callmeOnError) + } + return res +} + +// OperationForName gets the operation for the given id +func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { + for method, pathItem := range s.operations { + for path, op := range pathItem { + if operationID == op.ID { + return method, path, op, true + } + } + } + return "", "", nil, false +} + +// OperationFor the given method and path +func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { + if mp, ok := s.operations[strings.ToUpper(method)]; ok { + op, fn := mp[path] + return op, fn + } + return nil, false +} + +// Operations gathers all the operations specified in the spec document +func (s *Spec) Operations() map[string]map[string]*spec.Operation { + return s.operations +} + +func (s *Spec) structMapKeys(mp map[string]struct{}) []string { + if len(mp) == 0 { + return nil + } + + result := make([]string, 0, len(mp)) + for k := range mp { + result = append(result, k) + } + return result +} + +// AllPaths returns all the paths in the swagger spec +func (s *Spec) AllPaths() map[string]spec.PathItem { + if s.spec == nil || s.spec.Paths == nil { + return nil + } + return s.spec.Paths.Paths +} + +// OperationIDs gets all the operation ids based on method an dpath +func (s *Spec) OperationIDs() []string { + if len(s.operations) == 0 { + return nil + } + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p, o := range v { + if o.ID != "" { + result = append(result, o.ID) + } else { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + } + return result +} + +// OperationMethodPaths gets all the operation ids based on method an dpath +func (s *Spec) OperationMethodPaths() []string { + if len(s.operations) == 0 { + return nil + } + result := make([]string, 0, len(s.operations)) + for method, v := range s.operations { + for p := range v { + result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) + } + } + return result +} + +// RequiredConsumes gets all the distinct consumes that are specified in the specification document +func (s *Spec) RequiredConsumes() []string { + return s.structMapKeys(s.consumes) +} + +// RequiredProduces gets all the distinct produces that are specified in the specification document +func (s *Spec) RequiredProduces() []string { + return s.structMapKeys(s.produces) +} + +// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec +func (s *Spec) RequiredSecuritySchemes() []string { + return s.structMapKeys(s.authSchemes) +} + +// SchemaRef is a reference to a schema +type SchemaRef struct { + Name string + Ref spec.Ref + Schema *spec.Schema + TopLevel bool +} + +// SchemasWithAllOf returns schema references to all schemas that are defined +// with an allOf key +func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { + for _, v := range s.allOfs { + result = append(result, v) + } + return +} + +// AllDefinitions returns schema references for all the definitions that were discovered +func (s *Spec) AllDefinitions() (result []SchemaRef) { + for _, v := range s.allSchemas { + result = append(result, v) + } + return +} + +// AllDefinitionReferences returns json refs for all the discovered schemas +func (s *Spec) AllDefinitionReferences() (result []string) { + for _, v := range s.references.schemas { + result = append(result, v.String()) + } + return +} + +// AllParameterReferences returns json refs for all the discovered parameters +func (s *Spec) AllParameterReferences() (result []string) { + for _, v := range s.references.parameters { + result = append(result, v.String()) + } + return +} + +// AllResponseReferences returns json refs for all the discovered responses +func (s *Spec) AllResponseReferences() (result []string) { + for _, v := range s.references.responses { + result = append(result, v.String()) + } + return +} + +// AllPathItemReferences returns the references for all the items +func (s *Spec) AllPathItemReferences() (result []string) { + for _, v := range s.references.pathItems { + result = append(result, v.String()) + } + return +} + +// AllItemsReferences returns the references for all the items in simple schemas (parameters or headers). +// +// NOTE: since Swagger 2.0 forbids $ref in simple params, this should always yield an empty slice for a valid +// Swagger 2.0 spec. +func (s *Spec) AllItemsReferences() (result []string) { + for _, v := range s.references.items { + result = append(result, v.String()) + } + return +} + +// AllReferences returns all the references found in the document, with possible duplicates +func (s *Spec) AllReferences() (result []string) { + for _, v := range s.references.allRefs { + result = append(result, v.String()) + } + return +} + +// AllRefs returns all the unique references found in the document +func (s *Spec) AllRefs() (result []spec.Ref) { + set := make(map[string]struct{}) + for _, v := range s.references.allRefs { + a := v.String() + if a == "" { + continue + } + if _, ok := set[a]; !ok { + set[a] = struct{}{} + result = append(result, v) + } + } + return +} + +func cloneStringMap(source map[string]string) map[string]string { + res := make(map[string]string, len(source)) + for k, v := range source { + res[k] = v + } + return res +} + +func cloneEnumMap(source map[string][]interface{}) map[string][]interface{} { + res := make(map[string][]interface{}, len(source)) + for k, v := range source { + res[k] = v + } + return res +} + +// ParameterPatterns returns all the patterns found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterPatterns() map[string]string { + return cloneStringMap(s.patterns.parameters) +} + +// HeaderPatterns returns all the patterns found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderPatterns() map[string]string { + return cloneStringMap(s.patterns.headers) +} + +// ItemsPatterns returns all the patterns found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsPatterns() map[string]string { + return cloneStringMap(s.patterns.items) +} + +// SchemaPatterns returns all the patterns found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaPatterns() map[string]string { + return cloneStringMap(s.patterns.schemas) +} + +// AllPatterns returns all the patterns found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllPatterns() map[string]string { + return cloneStringMap(s.patterns.allPatterns) +} + +// ParameterEnums returns all the enums found in parameters +// the map is cloned to avoid accidental changes +func (s *Spec) ParameterEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.parameters) +} + +// HeaderEnums returns all the enums found in response headers +// the map is cloned to avoid accidental changes +func (s *Spec) HeaderEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.headers) +} + +// ItemsEnums returns all the enums found in simple array items +// the map is cloned to avoid accidental changes +func (s *Spec) ItemsEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.items) +} + +// SchemaEnums returns all the enums found in schemas +// the map is cloned to avoid accidental changes +func (s *Spec) SchemaEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.schemas) +} + +// AllEnums returns all the enums found in the spec +// the map is cloned to avoid accidental changes +func (s *Spec) AllEnums() map[string][]interface{} { + return cloneEnumMap(s.enums.allEnums) +} diff --git a/vendor/github.com/go-openapi/analysis/appveyor.yml b/vendor/github.com/go-openapi/analysis/appveyor.yml new file mode 100644 index 000000000000..3239d74416a7 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/appveyor.yml @@ -0,0 +1,33 @@ +version: "0.1.{build}" + +clone_folder: C:\go-openapi\analysis +shallow_clone: true # for startup speed +pull_requests: + do_not_increment_build_number: true + +#skip_tags: true +#skip_branch_with_pr: true + +# appveyor.yml +build: off + +environment: + GOPATH: c:\gopath + +stack: go 1.12 + +test_script: + - go test -v -timeout 20m ./... +#artifacts: +# - path: '%GOPATH%\bin\*.exe' +deploy: off + +notifications: + - provider: Slack + incoming_webhook: https://hooks.slack.com/services/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 + auth_token: + secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= + channel: bots + on_build_success: false + on_build_failure: true + on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/analysis/debug.go b/vendor/github.com/go-openapi/analysis/debug.go new file mode 100644 index 000000000000..84cc4e54cb79 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/debug.go @@ -0,0 +1,47 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + // Debug is true when the SWAGGER_DEBUG env var is not empty. + // It enables a more verbose logging of the spec analyzer. + Debug = os.Getenv("SWAGGER_DEBUG") != "" + // analysisLogger is a debug logger for this package + analysisLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + analysisLogger = log.New(os.Stdout, "analysis:", log.LstdFlags) +} + +func debugLog(msg string, args ...interface{}) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + analysisLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go new file mode 100644 index 000000000000..d5294c0950b6 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/doc.go @@ -0,0 +1,43 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package analysis provides methods to work with a Swagger specification document from +package go-openapi/spec. + +Analyzing a specification + +An analysed specification object (type Spec) provides methods to work with swagger definition. + +Flattening or expanding a specification + +Flattening a specification bundles all remote $ref in the main spec document. +Depending on flattening options, additional preprocessing may take place: + - full flattening: replacing all inline complex constructs by a named entry in #/definitions + - expand: replace all $ref's in the document by their expanded content + +Merging several specifications + +Mixin several specifications merges all Swagger constructs, and warns about found conflicts. + +Fixing a specification + +Unmarshalling a specification with golang json unmarshalling may lead to +some unwanted result on present but empty fields. + +Analyzing a Swagger schema + +Swagger schemas are analyzed to determine their complexity and qualify their content. +*/ +package analysis diff --git a/vendor/github.com/go-openapi/analysis/fixer.go b/vendor/github.com/go-openapi/analysis/fixer.go new file mode 100644 index 000000000000..bfe014ca51a8 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/fixer.go @@ -0,0 +1,76 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import "github.com/go-openapi/spec" + +// FixEmptyResponseDescriptions replaces empty ("") response +// descriptions in the input with "(empty)" to ensure that the +// resulting Swagger is stays valid. The problem appears to arise +// from reading in valid specs that have a explicit response +// description of "" (valid, response.description is required), but +// due to zero values being omitted upon re-serializing (omitempty) we +// lose them unless we stick some chars in there. +func FixEmptyResponseDescriptions(s *spec.Swagger) { + if s.Paths != nil { + for _, v := range s.Paths.Paths { + if v.Get != nil { + FixEmptyDescs(v.Get.Responses) + } + if v.Put != nil { + FixEmptyDescs(v.Put.Responses) + } + if v.Post != nil { + FixEmptyDescs(v.Post.Responses) + } + if v.Delete != nil { + FixEmptyDescs(v.Delete.Responses) + } + if v.Options != nil { + FixEmptyDescs(v.Options.Responses) + } + if v.Head != nil { + FixEmptyDescs(v.Head.Responses) + } + if v.Patch != nil { + FixEmptyDescs(v.Patch.Responses) + } + } + } + for k, v := range s.Responses { + FixEmptyDesc(&v) + s.Responses[k] = v + } +} + +// FixEmptyDescs adds "(empty)" as the description for any Response in +// the given Responses object that doesn't already have one. +func FixEmptyDescs(rs *spec.Responses) { + FixEmptyDesc(rs.Default) + for k, v := range rs.StatusCodeResponses { + FixEmptyDesc(&v) + rs.StatusCodeResponses[k] = v + } +} + +// FixEmptyDesc adds "(empty)" as the description to the given +// Response object if it doesn't already have one and isn't a +// ref. No-op on nil input. +func FixEmptyDesc(rs *spec.Response) { + if rs == nil || rs.Description != "" || rs.Ref.Ref.GetURL() != nil { + return + } + rs.Description = "(empty)" +} diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go new file mode 100644 index 000000000000..6993e4baf8ec --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/flatten.go @@ -0,0 +1,1729 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + "log" + "net/http" + "net/url" + "os" + slashpath "path" + "path/filepath" + "sort" + "strings" + + "strconv" + + "github.com/go-openapi/analysis/internal" + "github.com/go-openapi/jsonpointer" + swspec "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// FlattenOpts configuration for flattening a swagger specification. +type FlattenOpts struct { + Spec *Spec // The analyzed spec to work with + flattenContext *context // Internal context to track flattening activity + + BasePath string + + // Flattening options + Expand bool // If Expand is true, we skip flattening the spec and expand it instead + Minimal bool + Verbose bool + RemoveUnused bool + + /* Extra keys */ + _ struct{} // require keys +} + +// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. +func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *swspec.ExpandOptions { + return &swspec.ExpandOptions{RelativeBase: f.BasePath, SkipSchemas: skipSchemas} +} + +// Swagger gets the swagger specification for this flatten operation +func (f *FlattenOpts) Swagger() *swspec.Swagger { + return f.Spec.spec +} + +// newRef stores information about refs created during the flattening process +type newRef struct { + key string + newName string + path string + isOAIGen bool + resolved bool + schema *swspec.Schema + parents []string +} + +// context stores intermediary results from flatten +type context struct { + newRefs map[string]*newRef + warnings []string + resolved map[string]string +} + +func newContext() *context { + return &context{ + newRefs: make(map[string]*newRef, 150), + warnings: make([]string, 0), + resolved: make(map[string]string, 50), + } +} + +// Flatten an analyzed spec and produce a self-contained spec bundle. +// +// There is a minimal and a full flattening mode. +// +// Minimally flattening a spec means: +// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left +// unscathed) +// - Importing external (http, file) references so they become internal to the document +// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers +// like "$ref": "#/definitions/myObject/allOfs/1") +// +// A minimally flattened spec thus guarantees the following properties: +// - all $refs point to a local definition (i.e. '#/definitions/...') +// - definitions are unique +// +// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they +// represent a complex schema or express commonality in the spec. +// Otherwise, they are simply expanded. +// +// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. +// +// Fully flattening a spec means: +// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. +// +// By complex, we mean every JSON object with some properties. +// Arrays, when they do not define a tuple, +// or empty objects with or without additionalProperties, are not considered complex and remain inline. +// +// NOTE: rewritten schemas get a vendor extension x-go-gen-location so we know from which part of the spec definitions +// have been created. +// +// Available flattening options: +// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched +// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) +// - Verbose: croaks about name conflicts detected +// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening +// +// NOTE: expansion removes all $ref save circular $ref, which remain in place +// +// TODO: additional options +// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a +// x-go-name extension +// - LiftAllOfs: +// - limit the flattening of allOf members when simple objects +// - merge allOf with validation only +// - merge allOf with extensions only +// - ... +// +func Flatten(opts FlattenOpts) error { + // Make sure opts.BasePath is an absolute path + if !filepath.IsAbs(opts.BasePath) { + cwd, _ := os.Getwd() + opts.BasePath = filepath.Join(cwd, opts.BasePath) + } + // make sure drive letter on windows is normalized to lower case + u, _ := url.Parse(opts.BasePath) + opts.BasePath = u.String() + + opts.flattenContext = newContext() + + // recursively expand responses, parameters, path items and items in simple schemas. + // This simplifies the spec and leaves $ref only into schema objects. + if err := swspec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(!opts.Expand)); err != nil { + return err + } + + // strip current file from $ref's, so we can recognize them as proper definitions + // In particular, this works around for issue go-openapi/spec#76: leading absolute file in $ref is stripped + if err := normalizeRef(&opts); err != nil { + return err + } + + if opts.RemoveUnused { + // optionally removes shared parameters and responses already expanded (now unused) + // default parameters (i.e. under paths) remain. + opts.Swagger().Parameters = nil + opts.Swagger().Responses = nil + } + + opts.Spec.reload() // re-analyze + + // at this point there are no references left but in schemas + + for imported := false; !imported; { + // iteratively import remote references until none left. + // This inlining deals with name conflicts by introducing auto-generated names ("OAIGen") + var err error + if imported, err = importExternalReferences(&opts); err != nil { + return err + } + opts.Spec.reload() // re-analyze + } + + if !opts.Minimal && !opts.Expand { + // full flattening: rewrite inline schemas (schemas that aren't simple types or arrays or maps) + if err := nameInlinedSchemas(&opts); err != nil { + return err + } + + opts.Spec.reload() // re-analyze + } + + // rewrite JSON pointers other than $ref to named definitions + // and attempt to resolve conflicting names whenever possible. + if err := stripPointersAndOAIGen(&opts); err != nil { + return err + } + + if opts.RemoveUnused { + // remove unused definitions + expected := make(map[string]struct{}) + for k := range opts.Swagger().Definitions { + expected[slashpath.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{} + } + for _, k := range opts.Spec.AllDefinitionReferences() { + if _, ok := expected[k]; ok { + delete(expected, k) + } + } + for k := range expected { + debugLog("removing unused definition %s", slashpath.Base(k)) + if opts.Verbose { + log.Printf("info: removing unused definition: %s", slashpath.Base(k)) + } + delete(opts.Swagger().Definitions, slashpath.Base(k)) + } + opts.Spec.reload() // re-analyze + } + + // TODO: simplify known schema patterns to flat objects with properties + // examples: + // - lift simple allOf object, + // - empty allOf with validation only or extensions only + // - rework allOf arrays + // - rework allOf additionalProperties + + if opts.Verbose { + // issue notifications + croak(&opts) + } + return nil +} + +// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex"). +// +// Complex means the schema is any of: +// - a simple type (primitive) +// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) +// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will +// generate a definition) +func isAnalyzedAsComplex(asch *AnalyzedSchema) bool { + if !asch.IsSimpleSchema && !asch.IsArray && !asch.IsMap { + return true + } + return false +} + +// nameInlinedSchemas replaces every complex inline construct by a named definition. +func nameInlinedSchemas(opts *FlattenOpts) error { + debugLog("nameInlinedSchemas") + namer := &inlineSchemaNamer{ + Spec: opts.Swagger(), + Operations: opRefsByRef(gatherOperations(opts.Spec, nil)), + flattenContext: opts.flattenContext, + opts: opts, + } + depthFirst := sortDepthFirst(opts.Spec.allSchemas) + for _, key := range depthFirst { + sch := opts.Spec.allSchemas[key] + if sch.Schema != nil && sch.Schema.Ref.String() == "" && !sch.TopLevel { // inline schema + asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return fmt.Errorf("schema analysis [%s]: %v", key, err) + } + + if isAnalyzedAsComplex(asch) { // move complex schemas to definitions + if err := namer.Name(key, sch.Schema, asch); err != nil { + return err + } + } + } + } + return nil +} + +var depthGroupOrder = []string{ + "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition", +} + +func sortDepthFirst(data map[string]SchemaRef) []string { + // group by category (shared params, op param, statuscode response, default response, definitions) + // sort groups internally by number of parts in the key and lexical names + // flatten groups into a single list of keys + sorted := make([]string, 0, len(data)) + grouped := make(map[string]keys, len(data)) + for k := range data { + split := keyParts(k) + var pk string + if split.IsSharedOperationParam() { + pk = "sharedOpParam" + } + if split.IsOperationParam() { + pk = "opParam" + } + if split.IsStatusCodeResponse() { + pk = "codeResponse" + } + if split.IsDefaultResponse() { + pk = "defaultResponse" + } + if split.IsDefinition() { + pk = "definition" + } + if split.IsSharedParam() { + pk = "sharedParam" + } + if split.IsSharedResponse() { + pk = "sharedResponse" + } + grouped[pk] = append(grouped[pk], key{Segments: len(split), Key: k}) + } + + for _, pk := range depthGroupOrder { + res := grouped[pk] + sort.Sort(res) + for _, v := range res { + sorted = append(sorted, v.Key) + } + } + return sorted +} + +type key struct { + Segments int + Key string +} +type keys []key + +func (k keys) Len() int { return len(k) } +func (k keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k keys) Less(i, j int) bool { + return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) +} + +type inlineSchemaNamer struct { + Spec *swspec.Swagger + Operations map[string]opRef + flattenContext *context + opts *FlattenOpts +} + +func opRefsByRef(oprefs map[string]opRef) map[string]opRef { + result := make(map[string]opRef, len(oprefs)) + for _, v := range oprefs { + result[v.Ref.String()] = v + } + return result +} + +func (isn *inlineSchemaNamer) Name(key string, schema *swspec.Schema, aschema *AnalyzedSchema) error { + debugLog("naming inlined schema at %s", key) + + parts := keyParts(key) + for _, name := range namesFromKey(parts, aschema, isn.Operations) { + if name != "" { + // create unique name + newName, isOAIGen := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name)) + + // clone schema + sch, err := cloneSchema(schema) + if err != nil { + return err + } + + // replace values on schema + if err := rewriteSchemaToRef(isn.Spec, key, + swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { + return fmt.Errorf("error while creating definition %q from inline schema: %v", newName, err) + } + + // rewrite any dependent $ref pointing to this place, + // when not already pointing to a top-level definition. + // + // NOTE: this is important if such referers use arbitrary JSON pointers. + an := New(isn.Spec) + for k, v := range an.references.allRefs { + r, _, erd := deepestRef(isn.opts, v) + if erd != nil { + return fmt.Errorf("at %s, %v", k, erd) + } + if r.String() == key || + r.String() == slashpath.Join(definitionsPath, newName) && + slashpath.Dir(v.String()) != definitionsPath { + debugLog("found a $ref to a rewritten schema: %s points to %s", k, v.String()) + + // rewrite $ref to the new target + if err := updateRef(isn.Spec, k, + swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { + return err + } + } + } + + // NOTE: this extension is currently not used by go-swagger (provided for information only) + sch.AddExtension("x-go-gen-location", genLocation(parts)) + + // save cloned schema to definitions + saveSchema(isn.Spec, newName, sch) + + // keep track of created refs + if isn.flattenContext != nil { + debugLog("track created ref: key=%s, newName=%s, isOAIGen=%t", key, newName, isOAIGen) + resolved := false + if _, ok := isn.flattenContext.newRefs[key]; ok { + resolved = isn.flattenContext.newRefs[key].resolved + } + isn.flattenContext.newRefs[key] = &newRef{ + key: key, + newName: newName, + path: slashpath.Join(definitionsPath, newName), + isOAIGen: isOAIGen, + resolved: resolved, + schema: sch, + } + } + } + } + return nil +} + +// genLocation indicates from which section of the specification (models or operations) a definition has been created. +// +// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is is provided +// for information only. +func genLocation(parts splitKey) string { + if parts.IsOperation() { + return "operations" + } + if parts.IsDefinition() { + return "models" + } + return "" +} + +// uniqifyName yields a unique name for a definition +func uniqifyName(definitions swspec.Definitions, name string) (string, bool) { + isOAIGen := false + if name == "" { + name = "oaiGen" + isOAIGen = true + } + if len(definitions) == 0 { + return name, isOAIGen + } + + unq := true + for k := range definitions { + if strings.ToLower(k) == strings.ToLower(name) { + unq = false + break + } + } + + if unq { + return name, isOAIGen + } + + name += "OAIGen" + isOAIGen = true + var idx int + unique := name + _, known := definitions[unique] + for known { + idx++ + unique = fmt.Sprintf("%s%d", name, idx) + _, known = definitions[unique] + } + return unique, isOAIGen +} + +func namesFromKey(parts splitKey, aschema *AnalyzedSchema, operations map[string]opRef) []string { + var baseNames [][]string + var startIndex int + if parts.IsOperation() { + // params + if parts.IsOperationParam() || parts.IsSharedOperationParam() { + piref := parts.PathItemRef() + if piref.String() != "" && parts.IsOperationParam() { + if op, ok := operations[piref.String()]; ok { + startIndex = 5 + baseNames = append(baseNames, []string{op.ID, "params", "body"}) + } + } else if parts.IsSharedOperationParam() { + pref := parts.PathRef() + for k, v := range operations { + if strings.HasPrefix(k, pref.String()) { + startIndex = 4 + baseNames = append(baseNames, []string{v.ID, "params", "body"}) + } + } + } + } + // responses + if parts.IsOperationResponse() { + piref := parts.PathItemRef() + if piref.String() != "" { + if op, ok := operations[piref.String()]; ok { + startIndex = 6 + baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"}) + } + } + } + } + + // definitions + if parts.IsDefinition() { + nm := parts.DefinitionName() + if nm != "" { + startIndex = 2 + baseNames = append(baseNames, []string{parts.DefinitionName()}) + } + } + + var result []string + for _, segments := range baseNames { + nm := parts.BuildName(segments, startIndex, aschema) + if nm != "" { + result = append(result, nm) + } + } + sort.Strings(result) + return result +} + +const ( + paths = "paths" + responses = "responses" + parameters = "parameters" + definitions = "definitions" + definitionsPath = "#/definitions" +) + +var ( + ignoredKeys map[string]struct{} + validMethods map[string]struct{} +) + +func init() { + ignoredKeys = map[string]struct{}{ + "schema": {}, + "properties": {}, + "not": {}, + "anyOf": {}, + "oneOf": {}, + } + + validMethods = map[string]struct{}{ + "GET": {}, + "HEAD": {}, + "OPTIONS": {}, + "PATCH": {}, + "POST": {}, + "PUT": {}, + "DELETE": {}, + } +} + +type splitKey []string + +func (s splitKey) IsDefinition() bool { + return len(s) > 1 && s[0] == definitions +} + +func (s splitKey) DefinitionName() string { + if !s.IsDefinition() { + return "" + } + return s[1] +} + +func (s splitKey) isKeyName(i int) bool { + if i <= 0 { + return false + } + count := 0 + for idx := i - 1; idx > 0; idx-- { + if s[idx] != "properties" { + break + } + count++ + } + + return count%2 != 0 +} + +func (s splitKey) BuildName(segments []string, startIndex int, aschema *AnalyzedSchema) string { + for i, part := range s[startIndex:] { + if _, ignored := ignoredKeys[part]; !ignored || s.isKeyName(startIndex+i) { + if part == "items" || part == "additionalItems" { + if aschema.IsTuple || aschema.IsTupleWithExtra { + segments = append(segments, "tuple") + } else { + segments = append(segments, "items") + } + if part == "additionalItems" { + segments = append(segments, part) + } + continue + } + segments = append(segments, part) + } + } + return strings.Join(segments, " ") +} + +func (s splitKey) IsOperation() bool { + return len(s) > 1 && s[0] == paths +} + +func (s splitKey) IsSharedOperationParam() bool { + return len(s) > 2 && s[0] == paths && s[2] == parameters +} + +func (s splitKey) IsSharedParam() bool { + return len(s) > 1 && s[0] == parameters +} + +func (s splitKey) IsOperationParam() bool { + return len(s) > 3 && s[0] == paths && s[3] == parameters +} + +func (s splitKey) IsOperationResponse() bool { + return len(s) > 3 && s[0] == paths && s[3] == responses +} + +func (s splitKey) IsSharedResponse() bool { + return len(s) > 1 && s[0] == responses +} + +func (s splitKey) IsDefaultResponse() bool { + return len(s) > 4 && s[0] == paths && s[3] == responses && s[4] == "default" +} + +func (s splitKey) IsStatusCodeResponse() bool { + isInt := func() bool { + _, err := strconv.Atoi(s[4]) + return err == nil + } + return len(s) > 4 && s[0] == paths && s[3] == responses && isInt() +} + +func (s splitKey) ResponseName() string { + if s.IsStatusCodeResponse() { + code, _ := strconv.Atoi(s[4]) + return http.StatusText(code) + } + if s.IsDefaultResponse() { + return "Default" + } + return "" +} + +func (s splitKey) PathItemRef() swspec.Ref { + if len(s) < 3 { + return swspec.Ref{} + } + pth, method := s[1], s[2] + if _, isValidMethod := validMethods[strings.ToUpper(method)]; !isValidMethod && !strings.HasPrefix(method, "x-") { + return swspec.Ref{} + } + return swspec.MustCreateRef("#" + slashpath.Join("/", paths, jsonpointer.Escape(pth), strings.ToUpper(method))) +} + +func (s splitKey) PathRef() swspec.Ref { + if !s.IsOperation() { + return swspec.Ref{} + } + return swspec.MustCreateRef("#" + slashpath.Join("/", paths, jsonpointer.Escape(s[1]))) +} + +func keyParts(key string) splitKey { + var res []string + for _, part := range strings.Split(key[1:], "/") { + if part != "" { + res = append(res, jsonpointer.Unescape(part)) + } + } + return res +} + +func rewriteSchemaToRef(spec *swspec.Swagger, key string, ref swspec.Ref) error { + debugLog("rewriting schema to ref for %s with %s", key, ref.String()) + _, value, err := getPointerFromKey(spec, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *swspec.Schema: + return rewriteParentRef(spec, key, ref) + + case swspec.Schema: + return rewriteParentRef(spec, key, ref) + + case *swspec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + } + + case *swspec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + } + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +func rewriteParentRef(spec *swspec.Swagger, key string, ref swspec.Ref) error { + parent, entry, pvalue, err := getParentFromKey(spec, key) + if err != nil { + return err + } + + debugLog("rewriting holder for %T", pvalue) + switch container := pvalue.(type) { + case swspec.Response: + if err := rewriteParentRef(spec, "#"+parent, ref); err != nil { + return err + } + + case *swspec.Response: + container.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case *swspec.Responses: + statusCode, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", key[1:], err) + } + resp := container.StatusCodeResponses[statusCode] + resp.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + container.StatusCodeResponses[statusCode] = resp + + case map[string]swspec.Response: + resp := container[entry] + resp.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + container[entry] = resp + + case swspec.Parameter: + if err := rewriteParentRef(spec, "#"+parent, ref); err != nil { + return err + } + + case map[string]swspec.Parameter: + param := container[entry] + param.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + container[entry] = param + + case []swspec.Parameter: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", key[1:], err) + } + param := container[idx] + param.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + container[idx] = param + + case swspec.Definitions: + container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case map[string]swspec.Schema: + container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case []swspec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", key[1:], err) + } + container[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case *swspec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", key[1:], err) + } + container.Schemas[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled parent schema rewrite %s (%T)", key, pvalue) + } + return nil +} + +func cloneSchema(schema *swspec.Schema) (*swspec.Schema, error) { + var sch swspec.Schema + if err := swag.FromDynamicJSON(schema, &sch); err != nil { + return nil, fmt.Errorf("cannot clone schema: %v", err) + } + return &sch, nil +} + +// importExternalReferences iteratively digs remote references and imports them into the main schema. +// +// At every iteration, new remotes may be found when digging deeper: they are rebased to the current schema before being imported. +// +// This returns true when no more remote references can be found. +func importExternalReferences(opts *FlattenOpts) (bool, error) { + debugLog("importExternalReferences") + + groupedRefs := reverseIndexForSchemaRefs(opts) + sortedRefStr := make([]string, 0, len(groupedRefs)) + if opts.flattenContext == nil { + opts.flattenContext = newContext() + } + + // sort $ref resolution to ensure deterministic name conflict resolution + for refStr := range groupedRefs { + sortedRefStr = append(sortedRefStr, refStr) + } + sort.Strings(sortedRefStr) + + complete := true + + for _, refStr := range sortedRefStr { + entry := groupedRefs[refStr] + if entry.Ref.HasFragmentOnly { + continue + } + complete = false + var isOAIGen bool + + newName := opts.flattenContext.resolved[refStr] + if newName != "" { + // rewrite ref with already resolved external ref (useful for cyclical refs): + // rewrite external refs to local ones + debugLog("resolving known ref [%s] to %s", refStr, newName) + for _, key := range entry.Keys { + if err := updateRef(opts.Swagger(), key, + swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { + return false, err + } + } + } else { + // resolve schemas + debugLog("resolving schema from remote $ref [%s]", refStr) + sch, err := swspec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false)) + if err != nil { + return false, fmt.Errorf("could not resolve schema: %v", err) + } + + // at this stage only $ref analysis matters + partialAnalyzer := &Spec{ + references: referenceAnalysis{}, + patterns: patternAnalysis{}, + enums: enumAnalysis{}, + } + partialAnalyzer.reset() + partialAnalyzer.analyzeSchema("", *sch, "/") + + // now rewrite those refs with rebase + for key, ref := range partialAnalyzer.references.allRefs { + if err := updateRef(sch, key, swspec.MustCreateRef(rebaseRef(entry.Ref.String(), ref.String()))); err != nil { + return false, fmt.Errorf("failed to rewrite ref for key %q at %s: %v", key, entry.Ref.String(), err) + } + } + + // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name + newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref)) + debugLog("new name for [%s]: %s - with name conflict:%t", + strings.Join(entry.Keys, ", "), newName, isOAIGen) + + opts.flattenContext.resolved[refStr] = newName + + // rewrite the external refs to local ones + for _, key := range entry.Keys { + if err := updateRef(opts.Swagger(), key, + swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { + return false, err + } + + // keep track of created refs + resolved := false + if _, ok := opts.flattenContext.newRefs[key]; ok { + resolved = opts.flattenContext.newRefs[key].resolved + } + opts.flattenContext.newRefs[key] = &newRef{ + key: key, + newName: newName, + path: slashpath.Join(definitionsPath, newName), + isOAIGen: isOAIGen, + resolved: resolved, + schema: sch, + } + } + + // add the resolved schema to the definitions + saveSchema(opts.Swagger(), newName, sch) + } + } + // maintains ref index entries + for k := range opts.flattenContext.newRefs { + r := opts.flattenContext.newRefs[k] + + // update tracking with resolved schemas + if r.schema.Ref.String() != "" { + ref := swspec.MustCreateRef(r.path) + sch, err := swspec.ResolveRefWithBase(opts.Swagger(), &ref, opts.ExpandOpts(false)) + if err != nil { + return false, fmt.Errorf("could not resolve schema: %v", err) + } + r.schema = sch + } + // update tracking with renamed keys: got a cascade of refs + if r.path != k { + renamed := *r + renamed.key = r.path + opts.flattenContext.newRefs[renamed.path] = &renamed + + // indirect ref + r.newName = slashpath.Base(k) + r.schema = swspec.RefSchema(r.path) + r.path = k + r.isOAIGen = strings.Contains(k, "OAIGen") + } + } + + return complete, nil +} + +type refRevIdx struct { + Ref swspec.Ref + Keys []string +} + +// rebaseRef rebase a remote ref relative to a base ref. +// +// NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here). +// +// NOTE(windows): +// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// * "/ in paths may appear as escape sequences +func rebaseRef(baseRef string, ref string) string { + baseRef, _ = url.PathUnescape(baseRef) + ref, _ = url.PathUnescape(ref) + if baseRef == "" || baseRef == "." || strings.HasPrefix(baseRef, "#") { + return ref + } + + parts := strings.Split(ref, "#") + + baseParts := strings.Split(baseRef, "#") + baseURL, _ := url.Parse(baseParts[0]) + if strings.HasPrefix(ref, "#") { + if baseURL.Host == "" { + return strings.Join([]string{baseParts[0], parts[1]}, "#") + } + return strings.Join([]string{baseParts[0], parts[1]}, "") + } + + refURL, _ := url.Parse(parts[0]) + if refURL.Host != "" || filepath.IsAbs(parts[0]) { + // not rebasing an absolute path + return ref + } + + // there is a relative path + var basePath string + if baseURL.Host != "" { + // when there is a host, standard URI rules apply (with "/") + baseURL.Path = slashpath.Dir(baseURL.Path) + baseURL.Path = slashpath.Join(baseURL.Path, "/"+parts[0]) + return baseURL.String() + } + + // this is a local relative path + // basePart[0] and parts[0] are local filesystem directories/files + basePath = filepath.Dir(baseParts[0]) + relPath := filepath.Join(basePath, string(filepath.Separator)+parts[0]) + if len(parts) > 1 { + return strings.Join([]string{relPath, parts[1]}, "#") + } + return relPath +} + +// normalizePath renders absolute path on remote file refs +// +// NOTE(windows): +// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) +// * "/ in paths may appear as escape sequences +func normalizePath(ref swspec.Ref, opts *FlattenOpts) (normalizedPath string) { + uri, _ := url.PathUnescape(ref.String()) + if ref.HasFragmentOnly || filepath.IsAbs(uri) { + normalizedPath = uri + return + } + + refURL, _ := url.Parse(uri) + if refURL.Host != "" { + normalizedPath = uri + return + } + + parts := strings.Split(uri, "#") + // BasePath, parts[0] are local filesystem directories, guaranteed to be absolute at this stage + parts[0] = filepath.Join(filepath.Dir(opts.BasePath), parts[0]) + normalizedPath = strings.Join(parts, "#") + return +} + +func reverseIndexForSchemaRefs(opts *FlattenOpts) map[string]refRevIdx { + collected := make(map[string]refRevIdx) + for key, schRef := range opts.Spec.references.schemas { + // normalize paths before sorting, + // so we get together keys in same external file + normalizedPath := normalizePath(schRef, opts) + if entry, ok := collected[normalizedPath]; ok { + entry.Keys = append(entry.Keys, key) + collected[normalizedPath] = entry + } else { + collected[normalizedPath] = refRevIdx{ + Ref: schRef, + Keys: []string{key}, + } + } + } + return collected +} + +func nameFromRef(ref swspec.Ref) string { + u := ref.GetURL() + if u.Fragment != "" { + return swag.ToJSONName(slashpath.Base(u.Fragment)) + } + if u.Path != "" { + bn := slashpath.Base(u.Path) + if bn != "" && bn != "/" { + ext := slashpath.Ext(bn) + if ext != "" { + return swag.ToJSONName(bn[:len(bn)-len(ext)]) + } + return swag.ToJSONName(bn) + } + } + return swag.ToJSONName(strings.Replace(u.Host, ".", " ", -1)) +} + +func saveSchema(spec *swspec.Swagger, name string, schema *swspec.Schema) { + if schema == nil { + return + } + if spec.Definitions == nil { + spec.Definitions = make(map[string]swspec.Schema, 150) + } + spec.Definitions[name] = *schema +} + +// getPointerFromKey retrieves the content of the JSON pointer "key" +func getPointerFromKey(spec interface{}, key string) (string, interface{}, error) { + switch spec.(type) { + case *swspec.Schema: + case *swspec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + if key == "#/" { + return "", spec, nil + } + // unescape chars in key, e.g. "{}" from path params + pth, _ := internal.PathUnescape(key[1:]) + ptr, err := jsonpointer.New(pth) + if err != nil { + return "", nil, err + } + + value, _, err := ptr.Get(spec) + if err != nil { + debugLog("error when getting key: %s with path: %s", key, pth) + return "", nil, err + } + return pth, value, nil +} + +// getParentFromKey retrieves the container of the JSON pointer "key" +func getParentFromKey(spec interface{}, key string) (string, string, interface{}, error) { + switch spec.(type) { + case *swspec.Schema: + case *swspec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + // unescape chars in key, e.g. "{}" from path params + pth, _ := internal.PathUnescape(key[1:]) + + parent, entry := slashpath.Dir(pth), slashpath.Base(pth) + debugLog("getting schema holder at: %s, with entry: %s", parent, entry) + + pptr, err := jsonpointer.New(parent) + if err != nil { + return "", "", nil, err + } + pvalue, _, err := pptr.Get(spec) + if err != nil { + return "", "", nil, fmt.Errorf("can't get parent for %s: %v", parent, err) + } + return parent, entry, pvalue, nil +} + +// updateRef replaces a ref by another one +func updateRef(spec interface{}, key string, ref swspec.Ref) error { + switch spec.(type) { + case *swspec.Schema: + case *swspec.Swagger: + default: + panic("unexpected type used in getPointerFromKey") + } + debugLog("updating ref for %s with %s", key, ref.String()) + pth, value, err := getPointerFromKey(spec, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *swspec.Schema: + refable.Ref = ref + case *swspec.SchemaOrArray: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case *swspec.SchemaOrBool: + if refable.Schema != nil { + refable.Schema.Ref = ref + } + case swspec.Schema: + debugLog("rewriting holder for %T", refable) + _, entry, pvalue, erp := getParentFromKey(spec, key) + if erp != nil { + return err + } + switch container := pvalue.(type) { + case swspec.Definitions: + container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case map[string]swspec.Schema: + container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case []swspec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", pth, err) + } + container[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + case *swspec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", pth, err) + } + container.Schemas[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} + + // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled container type at %s: %T", key, value) + } + + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +// updateRefWithSchema replaces a ref with a schema (i.e. re-inline schema) +func updateRefWithSchema(spec *swspec.Swagger, key string, sch *swspec.Schema) error { + debugLog("updating ref for %s with schema", key) + pth, value, err := getPointerFromKey(spec, key) + if err != nil { + return err + } + + switch refable := value.(type) { + case *swspec.Schema: + *refable = *sch + case swspec.Schema: + _, entry, pvalue, erp := getParentFromKey(spec, key) + if erp != nil { + return err + } + switch container := pvalue.(type) { + case swspec.Definitions: + container[entry] = *sch + + case map[string]swspec.Schema: + container[entry] = *sch + + case []swspec.Schema: + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", pth, err) + } + container[idx] = *sch + + case *swspec.SchemaOrArray: + // NOTE: this is necessarily an array - otherwise, the parent would be *Schema + idx, err := strconv.Atoi(entry) + if err != nil { + return fmt.Errorf("%s not a number: %v", pth, err) + } + container.Schemas[idx] = *sch + + // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema + + default: + return fmt.Errorf("unhandled type for parent of [%s]: %T", key, value) + } + case *swspec.SchemaOrArray: + *refable.Schema = *sch + // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema + case *swspec.SchemaOrBool: + *refable.Schema = *sch + default: + return fmt.Errorf("no schema with ref found at %s for %T", key, value) + } + + return nil +} + +func containsString(names []string, name string) bool { + for _, nm := range names { + if nm == name { + return true + } + } + return false +} + +type opRef struct { + Method string + Path string + Key string + ID string + Op *swspec.Operation + Ref swspec.Ref +} + +type opRefs []opRef + +func (o opRefs) Len() int { return len(o) } +func (o opRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o opRefs) Less(i, j int) bool { return o[i].Key < o[j].Key } + +func gatherOperations(specDoc *Spec, operationIDs []string) map[string]opRef { + var oprefs opRefs + + for method, pathItem := range specDoc.Operations() { + for pth, operation := range pathItem { + vv := *operation + oprefs = append(oprefs, opRef{ + Key: swag.ToGoName(strings.ToLower(method) + " " + pth), + Method: method, + Path: pth, + ID: vv.ID, + Op: &vv, + Ref: swspec.MustCreateRef("#" + slashpath.Join("/paths", jsonpointer.Escape(pth), method)), + }) + } + } + + sort.Sort(oprefs) + + operations := make(map[string]opRef) + for _, opr := range oprefs { + nm := opr.ID + if nm == "" { + nm = opr.Key + } + + oo, found := operations[nm] + if found && oo.Method != opr.Method && oo.Path != opr.Path { + nm = opr.Key + } + if len(operationIDs) == 0 || containsString(operationIDs, opr.ID) || containsString(operationIDs, nm) { + opr.ID = nm + opr.Op.ID = nm + operations[nm] = opr + } + } + return operations +} + +// stripPointersAndOAIGen removes anonymous JSON pointers from spec and chain with name conflicts handler. +// This loops until the spec has no such pointer and all name conflicts have been reduced as much as possible. +func stripPointersAndOAIGen(opts *FlattenOpts) error { + // name all JSON pointers to anonymous documents + if err := namePointers(opts); err != nil { + return err + } + + // remove unnecessary OAIGen ref (created when flattening external refs creates name conflicts) + hasIntroducedPointerOrInline, ers := stripOAIGen(opts) + if ers != nil { + return ers + } + + // iterate as pointer or OAIGen resolution may introduce inline schemas or pointers + for hasIntroducedPointerOrInline { + if !opts.Minimal { + opts.Spec.reload() // re-analyze + if err := nameInlinedSchemas(opts); err != nil { + return err + } + } + + if err := namePointers(opts); err != nil { + return err + } + + // restrip + if hasIntroducedPointerOrInline, ers = stripOAIGen(opts); ers != nil { + return ers + } + + opts.Spec.reload() // re-analyze + } + return nil +} + +// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions. +// +// A dedupe is deemed unnecessary whenever: +// - the only conflict is with its (single) parent: OAIGen is merged into its parent +// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to +// the first parent. +// +// This function returns a true bool whenever it re-inlined a complex schema, so the caller may chose to iterate +// pointer and name resolution again. +func stripOAIGen(opts *FlattenOpts) (bool, error) { + debugLog("stripOAIGen") + replacedWithComplex := false + + // figure out referers of OAIGen definitions + for _, r := range opts.flattenContext.newRefs { + if !r.isOAIGen || r.resolved { // bail on already resolved entries (avoid looping) + continue + } + for k, v := range opts.Spec.references.allRefs { + if r.path != v.String() { + continue + } + found := false + for _, p := range r.parents { + if p == k { + found = true + break + } + } + if !found { + r.parents = append(r.parents, k) + } + } + } + + for k := range opts.flattenContext.newRefs { + r := opts.flattenContext.newRefs[k] + //debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s", + // k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String()) + if r.isOAIGen && len(r.parents) >= 1 /*&& r.schema.Ref.String() == "" */ { + pr := r.parents + sort.Strings(pr) + + // rewrite first parent schema in lexicographical order + debugLog("rewrite first parent %s with schema", pr[0]) + if err := updateRefWithSchema(opts.Swagger(), pr[0], r.schema); err != nil { + return false, err + } + if pa, ok := opts.flattenContext.newRefs[pr[0]]; ok && pa.isOAIGen { + // update parent in ref index entry + debugLog("update parent entry: %s", pr[0]) + pa.schema = r.schema + pa.resolved = false + replacedWithComplex = true + } + + // rewrite other parents to point to first parent + if len(pr) > 1 { + for _, p := range pr[1:] { + replacingRef := swspec.MustCreateRef(pr[0]) + + // set complex when replacing ref is an anonymous jsonpointer: further processing may be required + replacedWithComplex = replacedWithComplex || + slashpath.Dir(replacingRef.String()) != definitionsPath + debugLog("rewrite parent with ref: %s", replacingRef.String()) + + // NOTE: it is possible at this stage to introduce json pointers (to non-definitions places). + // Those are stripped later on. + if err := updateRef(opts.Swagger(), p, replacingRef); err != nil { + return false, err + } + + if pa, ok := opts.flattenContext.newRefs[p]; ok && pa.isOAIGen { + // update parent in ref index + debugLog("update parent entry: %s", p) + pa.schema = r.schema + pa.resolved = false + replacedWithComplex = true + } + } + } + + // remove OAIGen definition + debugLog("removing definition %s", slashpath.Base(r.path)) + delete(opts.Swagger().Definitions, slashpath.Base(r.path)) + + // propagate changes in ref index for keys which have this one as a parent + for kk, value := range opts.flattenContext.newRefs { + if kk == k || !value.isOAIGen || value.resolved { + continue + } + found := false + newParents := make([]string, 0, len(value.parents)) + for _, parent := range value.parents { + if parent == r.path { + found = true + parent = pr[0] + } + newParents = append(newParents, parent) + } + if found { + value.parents = newParents + } + } + + // mark naming conflict as resolved + debugLog("marking naming conflict resolved for key: %s", r.key) + opts.flattenContext.newRefs[r.key].isOAIGen = false + opts.flattenContext.newRefs[r.key].resolved = true + + // determine if the previous substitution did inline a complex schema + if r.schema != nil && r.schema.Ref.String() == "" { // inline schema + asch, err := Schema(SchemaOpts{Schema: r.schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if err != nil { + return false, err + } + debugLog("re-inline schema: parent: %s, %t", pr[0], isAnalyzedAsComplex(asch)) + replacedWithComplex = replacedWithComplex || + !(slashpath.Dir(pr[0]) == definitionsPath) && isAnalyzedAsComplex(asch) + } + } + } + + debugLog("replacedWithComplex: %t", replacedWithComplex) + opts.Spec.reload() // re-analyze + return replacedWithComplex, nil +} + +// croak logs notifications and warnings about valid, but possibly unwanted constructs resulting +// from flattening a spec +func croak(opts *FlattenOpts) { + reported := make(map[string]bool, len(opts.flattenContext.newRefs)) + for _, v := range opts.Spec.references.allRefs { + // warns about duplicate handling + for _, r := range opts.flattenContext.newRefs { + if r.isOAIGen && r.path == v.String() { + reported[r.newName] = true + } + } + } + for k := range reported { + log.Printf("warning: duplicate flattened definition name resolved as %s", k) + } + // warns about possible type mismatches + uniqueMsg := make(map[string]bool) + for _, msg := range opts.flattenContext.warnings { + if _, ok := uniqueMsg[msg]; ok { + continue + } + log.Printf("warning: %s", msg) + uniqueMsg[msg] = true + } +} + +// namePointers replaces all JSON pointers to anonymous documents by a $ref to a new named definitions. +// +// This is carried on depth-first. Pointers to $refs which are top level definitions are replaced by the $ref itself. +// Pointers to simple types are expanded, unless they express commonality (i.e. several such $ref are used). +func namePointers(opts *FlattenOpts) error { + debugLog("name pointers") + refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas)) + for k, ref := range opts.Spec.references.allRefs { + if slashpath.Dir(ref.String()) == definitionsPath { + // this a ref to a top-level definition: ok + continue + } + replacingRef, sch, erd := deepestRef(opts, ref) + if erd != nil { + return fmt.Errorf("at %s, %v", k, erd) + } + debugLog("planning pointer to replace at %s: %s, resolved to: %s", k, ref.String(), replacingRef.String()) + refsToReplace[k] = SchemaRef{ + Name: k, // caller + Ref: replacingRef, // callee + Schema: sch, + TopLevel: slashpath.Dir(replacingRef.String()) == definitionsPath, + } + } + depthFirst := sortDepthFirst(refsToReplace) + namer := &inlineSchemaNamer{ + Spec: opts.Swagger(), + Operations: opRefsByRef(gatherOperations(opts.Spec, nil)), + flattenContext: opts.flattenContext, + opts: opts, + } + + for _, key := range depthFirst { + v := refsToReplace[key] + // update current replacement, which may have been updated by previous changes of deeper elements + replacingRef, sch, erd := deepestRef(opts, v.Ref) + if erd != nil { + return fmt.Errorf("at %s, %v", key, erd) + } + v.Ref = replacingRef + v.Schema = sch + v.TopLevel = slashpath.Dir(replacingRef.String()) == definitionsPath + debugLog("replacing pointer at %s: resolved to: %s", key, v.Ref.String()) + + if v.TopLevel { + debugLog("replace pointer %s by canonical definition: %s", key, v.Ref.String()) + // if the schema is a $ref to a top level definition, just rewrite the pointer to this $ref + if err := updateRef(opts.Swagger(), key, v.Ref); err != nil { + return err + } + } else { + // this is a JSON pointer to an anonymous document (internal or external): + // create a definition for this schema when: + // - it is a complex schema + // - or it is pointed by more than one $ref (i.e. expresses commonality) + // otherwise, expand the pointer (single reference to a simple type) + // + // The named definition for this follows the target's key, not the caller's + debugLog("namePointers at %s for %s", key, v.Ref.String()) + + // qualify the expanded schema + /* + if key == "#/paths/~1some~1where~1{id}/get/parameters/1/items" { + // DEBUG + //func getPointerFromKey(spec interface{}, key string) (string, interface{}, error) { + k, res, err := getPointerFromKey(namer.Spec, key) + debugLog("k = %s, res=%#v, err=%v", k, res, err) + } + */ + asch, ers := Schema(SchemaOpts{Schema: v.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) + if ers != nil { + return fmt.Errorf("schema analysis [%s]: %v", key, ers) + } + callers := make([]string, 0, 64) + + debugLog("looking for callers") + an := New(opts.Swagger()) + for k, w := range an.references.allRefs { + r, _, erd := deepestRef(opts, w) + if erd != nil { + return fmt.Errorf("at %s, %v", key, erd) + } + if r.String() == v.Ref.String() { + callers = append(callers, k) + } + } + debugLog("callers for %s: %d", v.Ref.String(), len(callers)) + if len(callers) == 0 { + // has already been updated and resolved + continue + } + + parts := keyParts(v.Ref.String()) + debugLog("number of callers for %s: %d", v.Ref.String(), len(callers)) + // identifying edge case when the namer did nothing because we point to a non-schema object + // no definition is created and we expand the $ref for all callers + if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() { + debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String()) + if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil { + return err + } + + // regular case: we named the $ref as a definition, and we move all callers to this new $ref + for _, caller := range callers { + if caller != key { + // move $ref for next to resolve + debugLog("identified caller of %s at [%s]", v.Ref.String(), caller) + c := refsToReplace[caller] + c.Ref = v.Ref + refsToReplace[caller] = c + } + } + } else { + debugLog("expand JSON pointer for key=%s", key) + if err := updateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil { + return err + } + // NOTE: there is no other caller to update + } + } + } + opts.Spec.reload() // re-analyze + return nil +} + +// deepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. +// - if no definition is found, returns the deepest ref. +// - pointers to external files are expanded +// +// NOTE: all external $ref's are assumed to be already expanded at this stage. +func deepestRef(opts *FlattenOpts, ref swspec.Ref) (swspec.Ref, *swspec.Schema, error) { + if !ref.HasFragmentOnly { + // we found an external $ref, which is odd + // does nothing on external $refs + return ref, nil, nil + } + currentRef := ref + visited := make(map[string]bool, 64) +DOWNREF: + for currentRef.String() != "" { + if slashpath.Dir(currentRef.String()) == definitionsPath { + // this is a top-level definition: stop here and return this ref + return currentRef, nil, nil + } + if _, beenThere := visited[currentRef.String()]; beenThere { + return swspec.Ref{}, nil, + fmt.Errorf("cannot resolve cyclic chain of pointers under %s", currentRef.String()) + } + visited[currentRef.String()] = true + value, _, err := currentRef.GetPointer().Get(opts.Swagger()) + if err != nil { + return swspec.Ref{}, nil, err + } + switch refable := value.(type) { + case *swspec.Schema: + if refable.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Ref + + case swspec.Schema: + if refable.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Ref + + case *swspec.SchemaOrArray: + if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Schema.Ref + + case *swspec.SchemaOrBool: + if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { + break DOWNREF + } + currentRef = refable.Schema.Ref + + case swspec.Response: + // a pointer points to a schema initially marshalled in responses section... + // Attempt to convert this to a schema. If this fails, the spec is invalid + asJSON, _ := refable.MarshalJSON() + var asSchema swspec.Schema + err := asSchema.UnmarshalJSON(asJSON) + if err != nil { + return swspec.Ref{}, nil, + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", + currentRef.String(), value) + + } + opts.flattenContext.warnings = append(opts.flattenContext.warnings, + fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + + case swspec.Parameter: + // a pointer points to a schema initially marshalled in parameters section... + // Attempt to convert this to a schema. If this fails, the spec is invalid + asJSON, _ := refable.MarshalJSON() + var asSchema swspec.Schema + err := asSchema.UnmarshalJSON(asJSON) + if err != nil { + return swspec.Ref{}, nil, + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", + currentRef.String(), value) + + } + opts.flattenContext.warnings = append(opts.flattenContext.warnings, + fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String())) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref + + default: + return swspec.Ref{}, nil, + fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T", + currentRef.String(), value) + + } + } + // assess what schema we're ending with + sch, erv := swspec.ResolveRefWithBase(opts.Swagger(), ¤tRef, opts.ExpandOpts(false)) + if erv != nil { + return swspec.Ref{}, nil, erv + } + if sch == nil { + return swspec.Ref{}, nil, fmt.Errorf("no schema found at %s", currentRef.String()) + } + return currentRef, sch, nil +} + +// normalizeRef strips the current file from any $ref. This works around issue go-openapi/spec#76: +// leading absolute file in $ref is stripped +func normalizeRef(opts *FlattenOpts) error { + debugLog("normalizeRef") + opts.Spec.reload() // re-analyze + for k, w := range opts.Spec.references.allRefs { + if strings.HasPrefix(w.String(), opts.BasePath+definitionsPath) { // may be a mix of / and \, depending on OS + // strip base path from definition + debugLog("stripping absolute path for: %s", w.String()) + if err := updateRef(opts.Swagger(), k, + swspec.MustCreateRef(slashpath.Join(definitionsPath, slashpath.Base(w.String())))); err != nil { + return err + } + } + } + opts.Spec.reload() // re-analyze + return nil +} diff --git a/vendor/github.com/go-openapi/analysis/go.mod b/vendor/github.com/go-openapi/analysis/go.mod new file mode 100644 index 000000000000..95652cbe3965 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/go.mod @@ -0,0 +1,13 @@ +module github.com/go-openapi/analysis + +require ( + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 // indirect + github.com/go-openapi/errors v0.19.2 // indirect + github.com/go-openapi/jsonpointer v0.19.2 + github.com/go-openapi/loads v0.19.0 + github.com/go-openapi/spec v0.19.2 + github.com/go-openapi/strfmt v0.19.0 + github.com/go-openapi/swag v0.19.2 + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/go-openapi/analysis/go.sum b/vendor/github.com/go-openapi/analysis/go.sum new file mode 100644 index 000000000000..5a8c0dbeee23 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/go.sum @@ -0,0 +1,79 @@ +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/loads v0.19.0 h1:wCOBNscACI8L93tt5tvB2zOMkJ098XCw3fP0BY2ybDA= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0 h1:0Dn9qy1G9+UJfRU7TR8bmdGxb4uifB7HNrJjOnV0yPk= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/analysis/internal/post_go18.go b/vendor/github.com/go-openapi/analysis/internal/post_go18.go new file mode 100644 index 000000000000..f96f55c08733 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/post_go18.go @@ -0,0 +1,29 @@ +// +build go1.8 + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "net/url" + +// PathUnescape provides url.PathUnescape(), with seamless +// go version support for pre-go1.8 +// +// TODO: this function is currently defined in go-openapi/swag, +// but unexported. We might chose to export it, or simple phase +// out pre-go1.8 support. +func PathUnescape(path string) (string, error) { + return url.PathUnescape(path) +} diff --git a/vendor/github.com/go-openapi/analysis/internal/pre_go18.go b/vendor/github.com/go-openapi/analysis/internal/pre_go18.go new file mode 100644 index 000000000000..4cc644182209 --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/internal/pre_go18.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "net/url" + +// PathUnescape provides url.PathUnescape(), with seamless +// go version support for pre-go1.8 +// +// TODO: this function is currently defined in go-openapi/swag, +// but unexported. We might chose to export it, or simple phase +// out pre-go1.8 support. +func PathUnescape(path string) (string, error) { + return url.QueryUnescape(path) +} diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go new file mode 100644 index 000000000000..625c46f8f9fb --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/mixin.go @@ -0,0 +1,425 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analysis + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/spec" +) + +// Mixin modifies the primary swagger spec by adding the paths and +// definitions from the mixin specs. Top level parameters and +// responses from the mixins are also carried over. Operation id +// collisions are avoided by appending "Mixin" but only if +// needed. +// +// The following parts of primary are subject to merge, filling empty details +// - Info +// - BasePath +// - Host +// - ExternalDocs +// +// Consider calling FixEmptyResponseDescriptions() on the modified primary +// if you read them from storage and they are valid to start with. +// +// Entries in "paths", "definitions", "parameters" and "responses" are +// added to the primary in the order of the given mixins. If the entry +// already exists in primary it is skipped with a warning message. +// +// The count of skipped entries (from collisions) is returned so any +// deviation from the number expected can flag a warning in your build +// scripts. Carefully review the collisions before accepting them; +// consider renaming things if possible. +// +// No key normalization takes place (paths, type defs, +// etc). Ensure they are canonical if your downstream tools do +// key normalization of any form. +// +// Merging schemes (http, https), and consumers/producers do not account for +// collisions. +func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { + skipped := make([]string, 0, len(mixins)) + opIds := getOpIds(primary) + initPrimary(primary) + + for i, m := range mixins { + skipped = append(skipped, mergeSwaggerProps(primary, m)...) + + skipped = append(skipped, mergeConsumes(primary, m)...) + + skipped = append(skipped, mergeProduces(primary, m)...) + + skipped = append(skipped, mergeTags(primary, m)...) + + skipped = append(skipped, mergeSchemes(primary, m)...) + + skipped = append(skipped, mergeSecurityDefinitions(primary, m)...) + + skipped = append(skipped, mergeSecurityRequirements(primary, m)...) + + skipped = append(skipped, mergeDefinitions(primary, m)...) + + // merging paths requires a map of operationIDs to work with + skipped = append(skipped, mergePaths(primary, m, opIds, i)...) + + skipped = append(skipped, mergeParameters(primary, m)...) + + skipped = append(skipped, mergeResponses(primary, m)...) + } + return skipped +} + +// getOpIds extracts all the paths..operationIds from the given +// spec and returns them as the keys in a map with 'true' values. +func getOpIds(s *spec.Swagger) map[string]bool { + rv := make(map[string]bool) + if s.Paths == nil { + return rv + } + for _, v := range s.Paths.Paths { + piops := pathItemOps(v) + for _, op := range piops { + rv[op.ID] = true + } + } + return rv +} + +func pathItemOps(p spec.PathItem) []*spec.Operation { + var rv []*spec.Operation + rv = appendOp(rv, p.Get) + rv = appendOp(rv, p.Put) + rv = appendOp(rv, p.Post) + rv = appendOp(rv, p.Delete) + rv = appendOp(rv, p.Head) + rv = appendOp(rv, p.Patch) + return rv +} + +func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation { + if op == nil { + return ops + } + return append(ops, op) +} + +func mergeSecurityDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.SecurityDefinitions { + if _, exists := primary.SecurityDefinitions[k]; exists { + warn := fmt.Sprintf( + "SecurityDefinitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + continue + } + primary.SecurityDefinitions[k] = v + } + return +} + +func mergeSecurityRequirements(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for _, v := range m.Security { + found := false + for _, vv := range primary.Security { + if reflect.DeepEqual(v, vv) { + found = true + break + } + } + if found { + warn := fmt.Sprintf( + "Security requirement: '%v' already exists in primary or higher priority mixin, skipping\n", v) + skipped = append(skipped, warn) + continue + } + primary.Security = append(primary.Security, v) + } + return +} + +func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Definitions { + // assume name collisions represent IDENTICAL type. careful. + if _, exists := primary.Definitions[k]; exists { + warn := fmt.Sprintf( + "definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + continue + } + primary.Definitions[k] = v + } + return +} + +func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, mixIndex int) (skipped []string) { + if m.Paths != nil { + for k, v := range m.Paths.Paths { + if _, exists := primary.Paths.Paths[k]; exists { + warn := fmt.Sprintf( + "paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + continue + } + + // Swagger requires that operationIds be + // unique within a spec. If we find a + // collision we append "Mixin0" to the + // operatoinId we are adding, where 0 is mixin + // index. We assume that operationIds with + // all the proivded specs are already unique. + piops := pathItemOps(v) + for _, piop := range piops { + if opIds[piop.ID] { + piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex) + } + opIds[piop.ID] = true + } + primary.Paths.Paths[k] = v + } + } + return +} + +func mergeParameters(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Parameters { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Parameters[k]; exists { + warn := fmt.Sprintf( + "top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + continue + } + primary.Parameters[k] = v + } + return +} + +func mergeResponses(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for k, v := range m.Responses { + // could try to rename on conflict but would + // have to fix $refs in the mixin. Complain + // for now + if _, exists := primary.Responses[k]; exists { + warn := fmt.Sprintf( + "top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k) + skipped = append(skipped, warn) + continue + } + primary.Responses[k] = v + } + return skipped +} + +func mergeConsumes(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Consumes { + found := false + for _, vv := range primary.Consumes { + if v == vv { + found = true + break + } + } + if found { + // no warning here: we just skip it + continue + } + primary.Consumes = append(primary.Consumes, v) + } + return []string{} +} + +func mergeProduces(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Produces { + found := false + for _, vv := range primary.Produces { + if v == vv { + found = true + break + } + } + if found { + // no warning here: we just skip it + continue + } + primary.Produces = append(primary.Produces, v) + } + return []string{} +} + +func mergeTags(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { + for _, v := range m.Tags { + found := false + for _, vv := range primary.Tags { + if v.Name == vv.Name { + found = true + break + } + } + if found { + warn := fmt.Sprintf( + "top level tags entry with name '%v' already exists in primary or higher priority mixin, skipping\n", v.Name) + skipped = append(skipped, warn) + continue + } + primary.Tags = append(primary.Tags, v) + } + return +} + +func mergeSchemes(primary *spec.Swagger, m *spec.Swagger) []string { + for _, v := range m.Schemes { + found := false + for _, vv := range primary.Schemes { + if v == vv { + found = true + break + } + } + if found { + // no warning here: we just skip it + continue + } + primary.Schemes = append(primary.Schemes, v) + } + return []string{} +} + +func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string { + var skipped []string + primary.Extensions, skipped = mergeExtensions(primary.Extensions, m.Extensions) + + // merging details in swagger top properties + if primary.Host == "" { + primary.Host = m.Host + } + if primary.BasePath == "" { + primary.BasePath = m.BasePath + } + if primary.Info == nil { + primary.Info = m.Info + } else if m.Info != nil { + var sk []string + primary.Info.Extensions, sk = mergeExtensions(primary.Info.Extensions, m.Info.Extensions) + skipped = append(skipped, sk...) + if primary.Info.Description == "" { + primary.Info.Description = m.Info.Description + } + if primary.Info.Title == "" { + primary.Info.Description = m.Info.Description + } + if primary.Info.TermsOfService == "" { + primary.Info.TermsOfService = m.Info.TermsOfService + } + if primary.Info.Version == "" { + primary.Info.Version = m.Info.Version + } + + if primary.Info.Contact == nil { + primary.Info.Contact = m.Info.Contact + } else if m.Info.Contact != nil { + if primary.Info.Contact.Name == "" { + primary.Info.Contact.Name = m.Info.Contact.Name + } + if primary.Info.Contact.URL == "" { + primary.Info.Contact.URL = m.Info.Contact.URL + } + if primary.Info.Contact.Email == "" { + primary.Info.Contact.Email = m.Info.Contact.Email + } + } + + if primary.Info.License == nil { + primary.Info.License = m.Info.License + } else if m.Info.License != nil { + if primary.Info.License.Name == "" { + primary.Info.License.Name = m.Info.License.Name + } + if primary.Info.License.URL == "" { + primary.Info.License.URL = m.Info.License.URL + } + } + + } + if primary.ExternalDocs == nil { + primary.ExternalDocs = m.ExternalDocs + } else if m.ExternalDocs != nil { + if primary.ExternalDocs.Description == "" { + primary.ExternalDocs.Description = m.ExternalDocs.Description + } + if primary.ExternalDocs.URL == "" { + primary.ExternalDocs.URL = m.ExternalDocs.URL + } + } + return skipped +} + +func mergeExtensions(primary spec.Extensions, m spec.Extensions) (result spec.Extensions, skipped []string) { + if primary == nil { + result = m + return + } + if m == nil { + result = primary + return + } + result = primary + for k, v := range m { + if _, found := primary[k]; found { + skipped = append(skipped, k) + continue + } + primary[k] = v + } + return +} + +func initPrimary(primary *spec.Swagger) { + if primary.SecurityDefinitions == nil { + primary.SecurityDefinitions = make(map[string]*spec.SecurityScheme) + } + if primary.Security == nil { + primary.Security = make([]map[string][]string, 0, 10) + } + if primary.Produces == nil { + primary.Produces = make([]string, 0, 10) + } + if primary.Consumes == nil { + primary.Consumes = make([]string, 0, 10) + } + if primary.Tags == nil { + primary.Tags = make([]spec.Tag, 0, 10) + } + if primary.Schemes == nil { + primary.Schemes = make([]string, 0, 10) + } + if primary.Paths == nil { + primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)} + } + if primary.Paths.Paths == nil { + primary.Paths.Paths = make(map[string]spec.PathItem) + } + if primary.Definitions == nil { + primary.Definitions = make(spec.Definitions) + } + if primary.Parameters == nil { + primary.Parameters = make(map[string]spec.Parameter) + } + if primary.Responses == nil { + primary.Responses = make(map[string]spec.Response) + } +} diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go new file mode 100644 index 000000000000..398c7806394d --- /dev/null +++ b/vendor/github.com/go-openapi/analysis/schema.go @@ -0,0 +1,234 @@ +package analysis + +import ( + "fmt" + + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// SchemaOpts configures the schema analyzer +type SchemaOpts struct { + Schema *spec.Schema + Root interface{} + BasePath string + _ struct{} +} + +// Schema analysis, will classify the schema according to known +// patterns. +func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { + if opts.Schema == nil { + return nil, fmt.Errorf("no schema to analyze") + } + + a := &AnalyzedSchema{ + schema: opts.Schema, + root: opts.Root, + basePath: opts.BasePath, + } + + a.initializeFlags() + a.inferKnownType() + a.inferEnum() + a.inferBaseType() + + if err := a.inferMap(); err != nil { + return nil, err + } + if err := a.inferArray(); err != nil { + return nil, err + } + + a.inferTuple() + + if err := a.inferFromRef(); err != nil { + return nil, err + } + + a.inferSimpleSchema() + return a, nil +} + +// AnalyzedSchema indicates what the schema represents +type AnalyzedSchema struct { + schema *spec.Schema + root interface{} + basePath string + + hasProps bool + hasAllOf bool + hasItems bool + hasAdditionalProps bool + hasAdditionalItems bool + hasRef bool + + IsKnownType bool + IsSimpleSchema bool + IsArray bool + IsSimpleArray bool + IsMap bool + IsSimpleMap bool + IsExtendedObject bool + IsTuple bool + IsTupleWithExtra bool + IsBaseType bool + IsEnum bool +} + +// Inherits copies value fields from other onto this schema +func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) { + if other == nil { + return + } + a.hasProps = other.hasProps + a.hasAllOf = other.hasAllOf + a.hasItems = other.hasItems + a.hasAdditionalItems = other.hasAdditionalItems + a.hasAdditionalProps = other.hasAdditionalProps + a.hasRef = other.hasRef + + a.IsKnownType = other.IsKnownType + a.IsSimpleSchema = other.IsSimpleSchema + a.IsArray = other.IsArray + a.IsSimpleArray = other.IsSimpleArray + a.IsMap = other.IsMap + a.IsSimpleMap = other.IsSimpleMap + a.IsExtendedObject = other.IsExtendedObject + a.IsTuple = other.IsTuple + a.IsTupleWithExtra = other.IsTupleWithExtra + a.IsBaseType = other.IsBaseType + a.IsEnum = other.IsEnum +} + +func (a *AnalyzedSchema) inferFromRef() error { + if a.hasRef { + sch := new(spec.Schema) + sch.Ref = a.schema.Ref + err := spec.ExpandSchema(sch, a.root, nil) + if err != nil { + return err + } + rsch, err := Schema(SchemaOpts{ + Schema: sch, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + // NOTE(fredbi): currently the only cause for errors is + // unresolved ref. Since spec.ExpandSchema() expands the + // schema recursively, there is no chance to get there, + // until we add more causes for error in this schema analysis. + return err + } + a.inherits(rsch) + } + return nil +} + +func (a *AnalyzedSchema) inferSimpleSchema() { + a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap +} + +func (a *AnalyzedSchema) inferKnownType() { + tpe := a.schema.Type + format := a.schema.Format + a.IsKnownType = tpe.Contains("boolean") || + tpe.Contains("integer") || + tpe.Contains("number") || + tpe.Contains("string") || + (format != "" && strfmt.Default.ContainsName(format)) || + (a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems) +} + +func (a *AnalyzedSchema) inferMap() error { + if a.isObjectType() { + hasExtra := a.hasProps || a.hasAllOf + a.IsMap = a.hasAdditionalProps && !hasExtra + a.IsExtendedObject = a.hasAdditionalProps && hasExtra + if a.IsMap { + if a.schema.AdditionalProperties.Schema != nil { + msch, err := Schema(SchemaOpts{ + Schema: a.schema.AdditionalProperties.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + a.IsSimpleMap = msch.IsSimpleSchema + } else if a.schema.AdditionalProperties.Allows { + a.IsSimpleMap = true + } + } + } + return nil +} + +func (a *AnalyzedSchema) inferArray() error { + // an array has Items defined as an object schema, otherwise we qualify this JSON array as a tuple + // (yes, even if the Items array contains only one element). + // arrays in JSON schema may be unrestricted (i.e no Items specified). + // Note that arrays in Swagger MUST have Items. Nonetheless, we analyze unrestricted arrays. + // + // NOTE: the spec package misses the distinction between: + // items: [] and items: {}, so we consider both arrays here. + a.IsArray = a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Schemas == nil) + if a.IsArray && a.hasItems { + if a.schema.Items.Schema != nil { + itsch, err := Schema(SchemaOpts{ + Schema: a.schema.Items.Schema, + Root: a.root, + BasePath: a.basePath, + }) + if err != nil { + return err + } + a.IsSimpleArray = itsch.IsSimpleSchema + } + } + if a.IsArray && !a.hasItems { + a.IsSimpleArray = true + } + return nil +} + +func (a *AnalyzedSchema) inferTuple() { + tuple := a.hasItems && a.schema.Items.Schemas != nil + a.IsTuple = tuple && !a.hasAdditionalItems + a.IsTupleWithExtra = tuple && a.hasAdditionalItems +} + +func (a *AnalyzedSchema) inferBaseType() { + if a.isObjectType() { + a.IsBaseType = a.schema.Discriminator != "" + } +} + +func (a *AnalyzedSchema) inferEnum() { + a.IsEnum = len(a.schema.Enum) > 0 +} + +func (a *AnalyzedSchema) initializeFlags() { + a.hasProps = len(a.schema.Properties) > 0 + a.hasAllOf = len(a.schema.AllOf) > 0 + a.hasRef = a.schema.Ref.String() != "" + + a.hasItems = a.schema.Items != nil && + (a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0) + + a.hasAdditionalProps = a.schema.AdditionalProperties != nil && + (a.schema.AdditionalProperties != nil || a.schema.AdditionalProperties.Allows) + + a.hasAdditionalItems = a.schema.AdditionalItems != nil && + (a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows) + +} + +func (a *AnalyzedSchema) isObjectType() bool { + return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object")) +} + +func (a *AnalyzedSchema) isArrayType() bool { + return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array")) +} diff --git a/vendor/github.com/go-openapi/errors/.gitignore b/vendor/github.com/go-openapi/errors/.gitignore new file mode 100644 index 000000000000..dd91ed6a04e6 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml new file mode 100644 index 000000000000..6badaf1549f8 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.golangci.yml @@ -0,0 +1,20 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 30 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals diff --git a/vendor/github.com/go-openapi/errors/.travis.yml b/vendor/github.com/go-openapi/errors/.travis.yml new file mode 100644 index 000000000000..ba8a6d5918fc --- /dev/null +++ b/vendor/github.com/go-openapi/errors/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go +notifications: + slack: + secure: gZGp9NaHxi7zawlXJXKY92BGeDR1x0tbIcTyU5nMKLq0fhIaiEBJEeALwZ4VgqsSv3DytSSF5mLH8fevAM3ixE6hxjKQ+lQuf7V/w3btCN1CSWgoua5LOh1kTnqZQtJuRvO4pzoJcT3bJWBsVZ07VGNVzzJEy/zAKCHFqBUCXShw7QemlLBcYWFNqveTlvDIfCzvouoLnPoXwxEpkjxe9uz/ZKZgAnup/fXjC8RFctmgCnkCyvJTk0Y/fZCsufixJrJhshBWTnlrFCzRmgNkz2d+i1Ls3+MJ5EJJ2Tx/A5S63dL49J1f9Kr0AKHADmulSy8JNzIckKwbyFMYUecrsW+Lsu9DhnVMy1jj5pKsJDLRi2iIU3fXTMWbcyQbXjbbnBO2mPdP3Tzme75y4D9fc8hUPeyqVv2BU26NEbQ7EF2pKJ93OXvci7HlwRBgdJa8j6mP2LEDClcPQW00g7N/OZe0cTOMa8L5AwiBlbArwqt9wv6YLJoTG0wpDhzWsFvbCg5bJxe28Yn3fIDD0Lk1I7iSnBbp/5gzF19jmxqvcT8tHRkDL4xfjbENFTZjA5uB4Z4pj4WSyWQILLV/Jwhe3fi9uQwdviFHfj5pnVrmNUiGSOQL672K5wl2c3E9mGwejvsu2dfEz28n7Y/FUnOpY3/cBS0n27JJaerS0zMKNLE= +script: +- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..9322b065e37a --- /dev/null +++ b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/errors/LICENSE b/vendor/github.com/go-openapi/errors/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md new file mode 100644 index 000000000000..0ce50b23b2e9 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/README.md @@ -0,0 +1,8 @@ +# OpenAPI errors [![Build Status](https://travis-ci.org/go-openapi/errors.svg?branch=master)](https://travis-ci.org/go-openapi/errors) [![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/errors?status.svg)](http://godoc.org/github.com/go-openapi/errors) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/errors.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/errors)](https://goreportcard.com/report/github.com/go-openapi/errors) + +Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go new file mode 100644 index 000000000000..7667cee76c9c --- /dev/null +++ b/vendor/github.com/go-openapi/errors/api.go @@ -0,0 +1,164 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" +) + +// DefaultHTTPCode is used when the error Code cannot be used as an HTTP code. +var DefaultHTTPCode = http.StatusUnprocessableEntity + +// Error represents a error interface all swagger framework errors implement +type Error interface { + error + Code() int32 +} + +type apiError struct { + code int32 + message string +} + +func (a *apiError) Error() string { + return a.message +} + +func (a *apiError) Code() int32 { + return a.code +} + +// New creates a new API error with a code and a message +func New(code int32, message string, args ...interface{}) Error { + if len(args) > 0 { + return &apiError{code, fmt.Sprintf(message, args...)} + } + return &apiError{code, message} +} + +// NotFound creates a new not found error +func NotFound(message string, args ...interface{}) Error { + if message == "" { + message = "Not found" + } + return New(http.StatusNotFound, fmt.Sprintf(message, args...)) +} + +// NotImplemented creates a new not implemented error +func NotImplemented(message string) Error { + return New(http.StatusNotImplemented, message) +} + +// MethodNotAllowedError represents an error for when the path matches but the method doesn't +type MethodNotAllowedError struct { + code int32 + Allowed []string + message string +} + +func (m *MethodNotAllowedError) Error() string { + return m.message +} + +// Code the error code +func (m *MethodNotAllowedError) Code() int32 { + return m.code +} + +func errorAsJSON(err Error) []byte { + b, _ := json.Marshal(struct { + Code int32 `json:"code"` + Message string `json:"message"` + }{err.Code(), err.Error()}) + return b +} + +func flattenComposite(errs *CompositeError) *CompositeError { + var res []error + for _, er := range errs.Errors { + switch e := er.(type) { + case *CompositeError: + if len(e.Errors) > 0 { + flat := flattenComposite(e) + if len(flat.Errors) > 0 { + res = append(res, flat.Errors...) + } + } + default: + if e != nil { + res = append(res, e) + } + } + } + return CompositeValidationError(res...) +} + +// MethodNotAllowed creates a new method not allowed error +func MethodNotAllowed(requested string, allow []string) Error { + msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) + return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg} +} + +// ServeError the error handler interface implementation +func ServeError(rw http.ResponseWriter, r *http.Request, err error) { + rw.Header().Set("Content-Type", "application/json") + switch e := err.(type) { + case *CompositeError: + er := flattenComposite(e) + // strips composite errors to first element only + if len(er.Errors) > 0 { + ServeError(rw, r, er.Errors[0]) + } else { + // guard against empty CompositeError (invalid construct) + ServeError(rw, r, nil) + } + case *MethodNotAllowedError: + rw.Header().Add("Allow", strings.Join(err.(*MethodNotAllowedError).Allowed, ",")) + rw.WriteHeader(asHTTPCode(int(e.Code()))) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(e)) + } + case Error: + value := reflect.ValueOf(e) + if value.Kind() == reflect.Ptr && value.IsNil() { + rw.WriteHeader(http.StatusInternalServerError) + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) + return + } + rw.WriteHeader(asHTTPCode(int(e.Code()))) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(e)) + } + case nil: + rw.WriteHeader(http.StatusInternalServerError) + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) + default: + rw.WriteHeader(http.StatusInternalServerError) + if r == nil || r.Method != http.MethodHead { + _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, err.Error()))) + } + } +} + +func asHTTPCode(input int) int { + if input >= 600 { + return DefaultHTTPCode + } + return input +} diff --git a/vendor/github.com/go-openapi/errors/auth.go b/vendor/github.com/go-openapi/errors/auth.go new file mode 100644 index 000000000000..0545b501bd7c --- /dev/null +++ b/vendor/github.com/go-openapi/errors/auth.go @@ -0,0 +1,22 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import "net/http" + +// Unauthenticated returns an unauthenticated error +func Unauthenticated(scheme string) Error { + return New(http.StatusUnauthorized, "unauthenticated for %s", scheme) +} diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go new file mode 100644 index 000000000000..963d42740784 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/doc.go @@ -0,0 +1,28 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + +Package errors provides an Error interface and several concrete types +implementing this interface to manage API errors and JSON-schema validation +errors. + +A middleware handler ServeError() is provided to serve the errors types +it defines. + +It is used throughout the various go-openapi toolkit libraries +(https://github.com/go-openapi). + +*/ +package errors diff --git a/vendor/github.com/go-openapi/errors/go.mod b/vendor/github.com/go-openapi/errors/go.mod new file mode 100644 index 000000000000..084143001f02 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/go.mod @@ -0,0 +1,6 @@ +module github.com/go-openapi/errors + +require ( + github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/go-openapi/errors/go.sum b/vendor/github.com/go-openapi/errors/go.sum new file mode 100644 index 000000000000..e7314e279fb5 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/go.sum @@ -0,0 +1,9 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/go-openapi/errors/headers.go b/vendor/github.com/go-openapi/errors/headers.go new file mode 100644 index 000000000000..0360c094ea31 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/headers.go @@ -0,0 +1,85 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "fmt" + "net/http" +) + +// Validation represents a failure of a precondition +type Validation struct { + code int32 + Name string + In string + Value interface{} + message string + Values []interface{} +} + +func (e *Validation) Error() string { + return e.message +} + +// Code the error code +func (e *Validation) Code() int32 { + return e.code +} + +// ValidateName produces an error message name for an aliased property +func (e *Validation) ValidateName(name string) *Validation { + if e.Name == "" && name != "" { + e.Name = name + e.message = name + e.message + } + return e +} + +const ( + contentTypeFail = `unsupported media type %q, only %v are allowed` + responseFormatFail = `unsupported media type requested, only %v are available` +) + +// InvalidContentType error for an invalid content type +func InvalidContentType(value string, allowed []string) *Validation { + values := make([]interface{}, 0, len(allowed)) + for _, v := range allowed { + values = append(values, v) + } + return &Validation{ + code: http.StatusUnsupportedMediaType, + Name: "Content-Type", + In: "header", + Value: value, + Values: values, + message: fmt.Sprintf(contentTypeFail, value, allowed), + } +} + +// InvalidResponseFormat error for an unacceptable response format request +func InvalidResponseFormat(value string, allowed []string) *Validation { + values := make([]interface{}, 0, len(allowed)) + for _, v := range allowed { + values = append(values, v) + } + return &Validation{ + code: http.StatusNotAcceptable, + Name: "Accept", + In: "header", + Value: value, + Values: values, + message: fmt.Sprintf(responseFormatFail, allowed), + } +} diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go new file mode 100644 index 000000000000..6390d4636aa3 --- /dev/null +++ b/vendor/github.com/go-openapi/errors/middleware.go @@ -0,0 +1,51 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "bytes" + "fmt" + "strings" +) + +// APIVerificationFailed is an error that contains all the missing info for a mismatched section +// between the api registrations and the api spec +type APIVerificationFailed struct { + Section string + MissingSpecification []string + MissingRegistration []string +} + +// +func (v *APIVerificationFailed) Error() string { + buf := bytes.NewBuffer(nil) + + hasRegMissing := len(v.MissingRegistration) > 0 + hasSpecMissing := len(v.MissingSpecification) > 0 + + if hasRegMissing { + buf.WriteString(fmt.Sprintf("missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section)) + } + + if hasRegMissing && hasSpecMissing { + buf.WriteString("\n") + } + + if hasSpecMissing { + buf.WriteString(fmt.Sprintf("missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section)) + } + + return buf.String() +} diff --git a/vendor/github.com/go-openapi/errors/parsing.go b/vendor/github.com/go-openapi/errors/parsing.go new file mode 100644 index 000000000000..1bae87302a6b --- /dev/null +++ b/vendor/github.com/go-openapi/errors/parsing.go @@ -0,0 +1,59 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import "fmt" + +// ParseError respresents a parsing error +type ParseError struct { + code int32 + Name string + In string + Value string + Reason error + message string +} + +func (e *ParseError) Error() string { + return e.message +} + +// Code returns the http status code for this error +func (e *ParseError) Code() int32 { + return e.code +} + +const ( + parseErrorTemplContent = `parsing %s %s from %q failed, because %s` + parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s` +) + +// NewParseError creates a new parse error +func NewParseError(name, in, value string, reason error) *ParseError { + var msg string + if in == "" { + msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason) + } else { + msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason) + } + return &ParseError{ + code: 400, + Name: name, + In: in, + Value: value, + Reason: reason, + message: msg, + } +} diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go new file mode 100644 index 000000000000..14fb2c5f116d --- /dev/null +++ b/vendor/github.com/go-openapi/errors/schema.go @@ -0,0 +1,562 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "fmt" + "strings" +) + +const ( + invalidType = "%s is an invalid type name" + typeFail = "%s in %s must be of type %s" + typeFailWithData = "%s in %s must be of type %s: %q" + typeFailWithError = "%s in %s must be of type %s, because: %s" + requiredFail = "%s in %s is required" + tooLongMessage = "%s in %s should be at most %d chars long" + tooShortMessage = "%s in %s should be at least %d chars long" + patternFail = "%s in %s should match '%s'" + enumFail = "%s in %s should be one of %v" + multipleOfFail = "%s in %s should be a multiple of %v" + maxIncFail = "%s in %s should be less than or equal to %v" + maxExcFail = "%s in %s should be less than %v" + minIncFail = "%s in %s should be greater than or equal to %v" + minExcFail = "%s in %s should be greater than %v" + uniqueFail = "%s in %s shouldn't contain duplicates" + maxItemsFail = "%s in %s should have at most %d items" + minItemsFail = "%s in %s should have at least %d items" + typeFailNoIn = "%s must be of type %s" + typeFailWithDataNoIn = "%s must be of type %s: %q" + typeFailWithErrorNoIn = "%s must be of type %s, because: %s" + requiredFailNoIn = "%s is required" + tooLongMessageNoIn = "%s should be at most %d chars long" + tooShortMessageNoIn = "%s should be at least %d chars long" + patternFailNoIn = "%s should match '%s'" + enumFailNoIn = "%s should be one of %v" + multipleOfFailNoIn = "%s should be a multiple of %v" + maxIncFailNoIn = "%s should be less than or equal to %v" + maxExcFailNoIn = "%s should be less than %v" + minIncFailNoIn = "%s should be greater than or equal to %v" + minExcFailNoIn = "%s should be greater than %v" + uniqueFailNoIn = "%s shouldn't contain duplicates" + maxItemsFailNoIn = "%s should have at most %d items" + minItemsFailNoIn = "%s should have at least %d items" + noAdditionalItems = "%s in %s can't have additional items" + noAdditionalItemsNoIn = "%s can't have additional items" + tooFewProperties = "%s in %s should have at least %d properties" + tooFewPropertiesNoIn = "%s should have at least %d properties" + tooManyProperties = "%s in %s should have at most %d properties" + tooManyPropertiesNoIn = "%s should have at most %d properties" + unallowedProperty = "%s.%s in %s is a forbidden property" + unallowedPropertyNoIn = "%s.%s is a forbidden property" + failedAllPatternProps = "%s.%s in %s failed all pattern properties" + failedAllPatternPropsNoIn = "%s.%s failed all pattern properties" + multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v" +) + +// All code responses can be used to differentiate errors for different handling +// by the consuming program +const ( + // CompositeErrorCode remains 422 for backwards-compatibility + // and to separate it from validation errors with cause + CompositeErrorCode = 422 + // InvalidTypeCode is used for any subclass of invalid types + InvalidTypeCode = 600 + iota + RequiredFailCode + TooLongFailCode + TooShortFailCode + PatternFailCode + EnumFailCode + MultipleOfFailCode + MaxFailCode + MinFailCode + UniqueFailCode + MaxItemsFailCode + MinItemsFailCode + NoAdditionalItemsCode + TooFewPropertiesCode + TooManyPropertiesCode + UnallowedPropertyCode + FailedAllPatternPropsCode + MultipleOfMustBePositiveCode +) + +// CompositeError is an error that groups several errors together +type CompositeError struct { + Errors []error + code int32 + message string +} + +// Code for this error +func (c *CompositeError) Code() int32 { + return c.code +} + +func (c *CompositeError) Error() string { + if len(c.Errors) > 0 { + msgs := []string{c.message + ":"} + for _, e := range c.Errors { + msgs = append(msgs, e.Error()) + } + return strings.Join(msgs, "\n") + } + return c.message +} + +// CompositeValidationError an error to wrap a bunch of other errors +func CompositeValidationError(errors ...error) *CompositeError { + return &CompositeError{ + code: CompositeErrorCode, + Errors: append([]error{}, errors...), + message: "validation failure list", + } +} + +// FailedAllPatternProperties an error for when the property doesn't match a pattern +func FailedAllPatternProperties(name, in, key string) *Validation { + msg := fmt.Sprintf(failedAllPatternProps, name, key, in) + if in == "" { + msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key) + } + return &Validation{ + code: FailedAllPatternPropsCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// PropertyNotAllowed an error for when the property doesn't match a pattern +func PropertyNotAllowed(name, in, key string) *Validation { + msg := fmt.Sprintf(unallowedProperty, name, key, in) + if in == "" { + msg = fmt.Sprintf(unallowedPropertyNoIn, name, key) + } + return &Validation{ + code: UnallowedPropertyCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// TooFewProperties an error for an object with too few properties +func TooFewProperties(name, in string, n int64) *Validation { + msg := fmt.Sprintf(tooFewProperties, name, in, n) + if in == "" { + msg = fmt.Sprintf(tooFewPropertiesNoIn, name, n) + } + return &Validation{ + code: TooFewPropertiesCode, + Name: name, + In: in, + Value: n, + message: msg, + } +} + +// TooManyProperties an error for an object with too many properties +func TooManyProperties(name, in string, n int64) *Validation { + msg := fmt.Sprintf(tooManyProperties, name, in, n) + if in == "" { + msg = fmt.Sprintf(tooManyPropertiesNoIn, name, n) + } + return &Validation{ + code: TooManyPropertiesCode, + Name: name, + In: in, + Value: n, + message: msg, + } +} + +// AdditionalItemsNotAllowed an error for invalid additional items +func AdditionalItemsNotAllowed(name, in string) *Validation { + msg := fmt.Sprintf(noAdditionalItems, name, in) + if in == "" { + msg = fmt.Sprintf(noAdditionalItemsNoIn, name) + } + return &Validation{ + code: NoAdditionalItemsCode, + Name: name, + In: in, + message: msg, + } +} + +// InvalidCollectionFormat another flavor of invalid type error +func InvalidCollectionFormat(name, in, format string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: format, + message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name), + } +} + +// InvalidTypeName an error for when the type is invalid +func InvalidTypeName(typeName string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Value: typeName, + message: fmt.Sprintf(invalidType, typeName), + } +} + +// InvalidType creates an error for when the type is invalid +func InvalidType(name, in, typeName string, value interface{}) *Validation { + var message string + + if in != "" { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithData, name, in, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithError, name, in, typeName, value) + default: + message = fmt.Sprintf(typeFail, name, in, typeName) + } + } else { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value) + default: + message = fmt.Sprintf(typeFailNoIn, name, typeName) + } + } + + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: value, + message: message, + } + +} + +// DuplicateItems error for when an array contains duplicates +func DuplicateItems(name, in string) *Validation { + msg := fmt.Sprintf(uniqueFail, name, in) + if in == "" { + msg = fmt.Sprintf(uniqueFailNoIn, name) + } + return &Validation{ + code: UniqueFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooManyItems error for when an array contains too many items +func TooManyItems(name, in string, max int64) *Validation { + msg := fmt.Sprintf(maxItemsFail, name, in, max) + if in == "" { + msg = fmt.Sprintf(maxItemsFailNoIn, name, max) + } + + return &Validation{ + code: MaxItemsFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooFewItems error for when an array contains too few items +func TooFewItems(name, in string, min int64) *Validation { + msg := fmt.Sprintf(minItemsFail, name, in, min) + if in == "" { + msg = fmt.Sprintf(minItemsFailNoIn, name, min) + } + return &Validation{ + code: MinItemsFailCode, + Name: name, + In: in, + message: msg, + } +} + +// ExceedsMaximumInt error for when maxinum validation fails +func ExceedsMaximumInt(name, in string, max int64, exclusive bool) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: max, + message: message, + } +} + +// ExceedsMaximumUint error for when maxinum validation fails +func ExceedsMaximumUint(name, in string, max uint64, exclusive bool) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: max, + message: message, + } +} + +// ExceedsMaximum error for when maxinum validation fails +func ExceedsMaximum(name, in string, max float64, exclusive bool) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: max, + message: message, + } +} + +// ExceedsMinimumInt error for when maxinum validation fails +func ExceedsMinimumInt(name, in string, min int64, exclusive bool) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: min, + message: message, + } +} + +// ExceedsMinimumUint error for when maxinum validation fails +func ExceedsMinimumUint(name, in string, min uint64, exclusive bool) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: min, + message: message, + } +} + +// ExceedsMinimum error for when maxinum validation fails +func ExceedsMinimum(name, in string, min float64, exclusive bool) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: min, + message: message, + } +} + +// NotMultipleOf error for when multiple of validation fails +func NotMultipleOf(name, in string, multiple interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple) + } else { + msg = fmt.Sprintf(multipleOfFail, name, in, multiple) + } + return &Validation{ + code: MultipleOfFailCode, + Name: name, + In: in, + Value: multiple, + message: msg, + } +} + +// EnumFail error for when an enum validation fails +func EnumFail(name, in string, value interface{}, values []interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(enumFailNoIn, name, values) + } else { + msg = fmt.Sprintf(enumFail, name, in, values) + } + + return &Validation{ + code: EnumFailCode, + Name: name, + In: in, + Value: value, + Values: values, + message: msg, + } +} + +// Required error for when a value is missing +func Required(name, in string) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(requiredFailNoIn, name) + } else { + msg = fmt.Sprintf(requiredFail, name, in) + } + return &Validation{ + code: RequiredFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooLong error for when a string is too long +func TooLong(name, in string, max int64) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooLongMessageNoIn, name, max) + } else { + msg = fmt.Sprintf(tooLongMessage, name, in, max) + } + return &Validation{ + code: TooLongFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooShort error for when a string is too short +func TooShort(name, in string, min int64) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooShortMessageNoIn, name, min) + } else { + msg = fmt.Sprintf(tooShortMessage, name, in, min) + } + + return &Validation{ + code: TooShortFailCode, + Name: name, + In: in, + message: msg, + } +} + +// FailedPattern error for when a string fails a regex pattern match +// the pattern that is returned is the ECMA syntax version of the pattern not the golang version. +func FailedPattern(name, in, pattern string) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(patternFailNoIn, name, pattern) + } else { + msg = fmt.Sprintf(patternFail, name, in, pattern) + } + + return &Validation{ + code: PatternFailCode, + Name: name, + In: in, + message: msg, + } +} + +// MultipleOfMustBePositive error for when a +// multipleOf factor is negative +func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation { + return &Validation{ + code: MultipleOfMustBePositiveCode, + Name: name, + In: in, + Value: factor, + message: fmt.Sprintf(multipleOfMustBePositive, name, factor), + } +} diff --git a/vendor/github.com/go-openapi/jsonpointer/.editorconfig b/vendor/github.com/go-openapi/jsonpointer/.editorconfig new file mode 100644 index 000000000000..3152da69a5d7 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore new file mode 100644 index 000000000000..769c244007b5 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml new file mode 100644 index 000000000000..9aef9184e86d --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go +notifications: + slack: + secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= +script: +- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..9322b065e37a --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonpointer/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md new file mode 100644 index 000000000000..813788aff1c2 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -0,0 +1,15 @@ +# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) +An implementation of JSON Pointer - Go language + +## Status +Completed YES + +Tested YES + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +### Note +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/go-openapi/jsonpointer/go.mod b/vendor/github.com/go-openapi/jsonpointer/go.mod new file mode 100644 index 000000000000..422045df2d86 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/go.mod @@ -0,0 +1,6 @@ +module github.com/go-openapi/jsonpointer + +require ( + github.com/go-openapi/swag v0.19.2 + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/go-openapi/jsonpointer/go.sum b/vendor/github.com/go-openapi/jsonpointer/go.sum new file mode 100644 index 000000000000..f5e28beb4bf7 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/go.sum @@ -0,0 +1,22 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go new file mode 100644 index 000000000000..fe2d6ee574f1 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -0,0 +1,390 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package jsonpointer + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag" +) + +const ( + emptyPointer = `` + pointerSeparator = `/` + + invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator +) + +var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() +var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() + +// JSONPointable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONPointable interface { + JSONLookup(string) (interface{}, error) +} + +// JSONSetable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONSetable interface { + JSONSet(string, interface{}) error +} + +// New creates a new json pointer for the given string +func New(jsonPointerString string) (Pointer, error) { + + var p Pointer + err := p.parse(jsonPointerString) + return p, err + +} + +// Pointer the json pointer reprsentation +type Pointer struct { + referenceTokens []string +} + +// "Constructor", parses the given string JSON pointer +func (p *Pointer) parse(jsonPointerString string) error { + + var err error + + if jsonPointerString != emptyPointer { + if !strings.HasPrefix(jsonPointerString, pointerSeparator) { + err = errors.New(invalidStart) + } else { + referenceTokens := strings.Split(jsonPointerString, pointerSeparator) + for _, referenceToken := range referenceTokens[1:] { + p.referenceTokens = append(p.referenceTokens, referenceToken) + } + } + } + + return err +} + +// Get uses the pointer to retrieve a value from a JSON document +func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { + return p.get(document, swag.DefaultJSONNameProvider) +} + +// Set uses the pointer to set a value from a JSON document +func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { + return document, p.set(document, value, swag.DefaultJSONNameProvider) +} + +// GetForToken gets a value for a json pointer token 1 level deep +func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { + return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) +} + +// SetForToken gets a value for a json pointer token 1 level deep +func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { + return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) +} + +func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + + switch kind { + + case reflect.Struct: + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return nil, kind, err + } + return r, kind, nil + } + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return nil, kind, fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + return fld.Interface(), kind, nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + + if mv.IsValid() && !swag.IsZero(mv) { + return mv.Interface(), kind, nil + } + return nil, kind, fmt.Errorf("object has no key %q", decodedToken) + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return nil, kind, err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + return elem.Interface(), kind, nil + + default: + return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) + } + +} + +func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { + rValue := reflect.Indirect(reflect.ValueOf(node)) + switch rValue.Kind() { + + case reflect.Struct: + if ns, ok := node.(JSONSetable); ok { // pointer impl + return ns.JSONSet(decodedToken, data) + } + + if rValue.Type().Implements(jsonSetableType) { + return node.(JSONSetable).JSONSet(decodedToken, data) + } + + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + if fld.IsValid() { + fld.Set(reflect.ValueOf(data)) + } + return nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + rValue.SetMapIndex(kv, reflect.ValueOf(data)) + return nil + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + if !elem.CanSet() { + return fmt.Errorf("can't set slice index %s to %v", decodedToken, data) + } + elem.Set(reflect.ValueOf(data)) + return nil + + default: + return fmt.Errorf("invalid token reference %q", decodedToken) + } + +} + +func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { + + if nameProvider == nil { + nameProvider = swag.DefaultJSONNameProvider + } + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + return node, kind, nil + } + + for _, token := range p.referenceTokens { + + decodedToken := Unescape(token) + + r, knd, err := getSingleImpl(node, decodedToken, nameProvider) + if err != nil { + return nil, knd, err + } + node, kind = r, knd + + } + + rValue := reflect.ValueOf(node) + kind = rValue.Kind() + + return node, kind, nil +} + +func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { + knd := reflect.ValueOf(node).Kind() + + if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { + return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") + } + + if nameProvider == nil { + nameProvider = swag.DefaultJSONNameProvider + } + + // Full document when empty + if len(p.referenceTokens) == 0 { + return nil + } + + lastI := len(p.referenceTokens) - 1 + for i, token := range p.referenceTokens { + isLastToken := i == lastI + decodedToken := Unescape(token) + + if isLastToken { + + return setSingleImpl(node, data, decodedToken, nameProvider) + } + + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + + switch kind { + + case reflect.Struct: + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return err + } + fld := reflect.ValueOf(r) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + node = fld.Addr().Interface() + continue + } + node = r + continue + } + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { + node = fld.Addr().Interface() + continue + } + node = fld.Interface() + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + + if !mv.IsValid() { + return fmt.Errorf("object has no key %q", decodedToken) + } + if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr { + node = mv.Addr().Interface() + continue + } + node = mv.Interface() + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr { + node = elem.Addr().Interface() + continue + } + node = elem.Interface() + + default: + return fmt.Errorf("invalid token reference %q", decodedToken) + } + + } + + return nil +} + +// DecodedTokens returns the decoded tokens +func (p *Pointer) DecodedTokens() []string { + result := make([]string, 0, len(p.referenceTokens)) + for _, t := range p.referenceTokens { + result = append(result, Unescape(t)) + } + return result +} + +// IsEmpty returns true if this is an empty json pointer +// this indicates that it points to the root document +func (p *Pointer) IsEmpty() bool { + return len(p.referenceTokens) == 0 +} + +// Pointer to string representation function +func (p *Pointer) String() string { + + if len(p.referenceTokens) == 0 { + return emptyPointer + } + + pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) + + return pointerString +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +const ( + encRefTok0 = `~0` + encRefTok1 = `~1` + decRefTok0 = `~` + decRefTok1 = `/` +) + +// Unescape unescapes a json pointer reference token string to the original representation +func Unescape(token string) string { + step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) + step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) + return step2 +} + +// Escape escapes a pointer reference token string +func Escape(token string) string { + step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) + step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) + return step2 +} diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore new file mode 100644 index 000000000000..769c244007b5 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml new file mode 100644 index 000000000000..40b90757d894 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go +notifications: + slack: + secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= +script: +- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..9322b065e37a --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/LICENSE b/vendor/github.com/go-openapi/jsonreference/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md new file mode 100644 index 000000000000..66345f4c61fb --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -0,0 +1,15 @@ +# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) +An implementation of JSON Reference - Go language + +## Status +Work in progress ( 90% done ) + +## Dependencies +https://github.com/go-openapi/jsonpointer + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/jsonreference/go.mod b/vendor/github.com/go-openapi/jsonreference/go.mod new file mode 100644 index 000000000000..35adddfe40f0 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/go.mod @@ -0,0 +1,10 @@ +module github.com/go-openapi/jsonreference + +require ( + github.com/PuerkitoBio/purell v1.1.1 + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/go-openapi/jsonpointer v0.19.2 + github.com/stretchr/testify v1.3.0 + golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 // indirect + golang.org/x/text v0.3.2 // indirect +) diff --git a/vendor/github.com/go-openapi/jsonreference/go.sum b/vendor/github.com/go-openapi/jsonreference/go.sum new file mode 100644 index 000000000000..f1a7a34e3ce6 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/go.sum @@ -0,0 +1,36 @@ +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go new file mode 100644 index 000000000000..3bc0a6e26f8b --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -0,0 +1,156 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Main and unique file. +// +// created 26-02-2013 + +package jsonreference + +import ( + "errors" + "net/url" + "strings" + + "github.com/PuerkitoBio/purell" + "github.com/go-openapi/jsonpointer" +) + +const ( + fragmentRune = `#` +) + +// New creates a new reference for the given string +func New(jsonReferenceString string) (Ref, error) { + + var r Ref + err := r.parse(jsonReferenceString) + return r, err + +} + +// MustCreateRef parses the ref string and panics when it's invalid. +// Use the New method for a version that returns an error +func MustCreateRef(ref string) Ref { + r, err := New(ref) + if err != nil { + panic(err) + } + return r +} + +// Ref represents a json reference object +type Ref struct { + referenceURL *url.URL + referencePointer jsonpointer.Pointer + + HasFullURL bool + HasURLPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +// GetURL gets the URL for this reference +func (r *Ref) GetURL() *url.URL { + return r.referenceURL +} + +// GetPointer gets the json pointer for this reference +func (r *Ref) GetPointer() *jsonpointer.Pointer { + return &r.referencePointer +} + +// String returns the best version of the url for this reference +func (r *Ref) String() string { + + if r.referenceURL != nil { + return r.referenceURL.String() + } + + if r.HasFragmentOnly { + return fragmentRune + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +// IsRoot returns true if this reference is a root document +func (r *Ref) IsRoot() bool { + return r.referenceURL != nil && + !r.IsCanonical() && + !r.HasURLPathOnly && + r.referenceURL.Fragment == "" +} + +// IsCanonical returns true when this pointer starts with http(s):// or file:// +func (r *Ref) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) +} + +// "Constructor", parses the given string JSON reference +func (r *Ref) parse(jsonReferenceString string) error { + + parsed, err := url.Parse(jsonReferenceString) + if err != nil { + return err + } + + r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) + refURL := r.referenceURL + + if refURL.Scheme != "" && refURL.Host != "" { + r.HasFullURL = true + } else { + if refURL.Path != "" { + r.HasURLPathOnly = true + } else if refURL.RawQuery == "" && refURL.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refURL.Scheme == "file" + r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/") + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = jsonpointer.New(refURL.Fragment) + + return nil +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + childURL := child.GetURL() + parentURL := r.GetURL() + if childURL == nil { + return nil, errors.New("child url is nil") + } + if parentURL == nil { + return &child, nil + } + + ref, err := New(parentURL.ResolveReference(childURL).String()) + if err != nil { + return nil, err + } + return &ref, nil +} diff --git a/vendor/github.com/go-openapi/loads/.editorconfig b/vendor/github.com/go-openapi/loads/.editorconfig new file mode 100644 index 000000000000..3152da69a5d7 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore new file mode 100644 index 000000000000..e4f15f17bfc2 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.gitignore @@ -0,0 +1,4 @@ +secrets.yml +coverage.out +profile.cov +profile.out diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml new file mode 100644 index 000000000000..1932914e6d1a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.golangci.yml @@ -0,0 +1,22 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 30 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - gochecknoinits diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml new file mode 100644 index 000000000000..8a7e05d911c6 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go +notifications: + slack: + secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= +script: +- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..9322b065e37a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/go-openapi/loads/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md new file mode 100644 index 000000000000..071cf69ab97c --- /dev/null +++ b/vendor/github.com/go-openapi/loads/README.md @@ -0,0 +1,7 @@ +# Loads OAI specs [![Build Status](https://travis-ci.org/go-openapi/loads.svg?branch=master)](https://travis-ci.org/go-openapi/loads) [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/loads.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) + +Loading of OAI specification documents from local or remote locations. Supports JSON and YAML documents. diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go new file mode 100644 index 000000000000..3046da4cef39 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/doc.go @@ -0,0 +1,21 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package loads provides document loading methods for swagger (OAI) specifications. + +It is used by other go-openapi packages to load and run analysis on local or remote spec documents. + +*/ +package loads diff --git a/vendor/github.com/go-openapi/loads/go.mod b/vendor/github.com/go-openapi/loads/go.mod new file mode 100644 index 000000000000..e83c6ec30498 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/go.mod @@ -0,0 +1,9 @@ +module github.com/go-openapi/loads + +require ( + github.com/go-openapi/analysis v0.19.2 + github.com/go-openapi/spec v0.19.2 + github.com/go-openapi/swag v0.19.2 + github.com/stretchr/testify v1.3.0 + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/vendor/github.com/go-openapi/loads/go.sum b/vendor/github.com/go-openapi/loads/go.sum new file mode 100644 index 000000000000..b0658b2cd478 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/go.sum @@ -0,0 +1,79 @@ +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.19.0 h1:sYEyyO7OKQvJX0z4OyHWoGt0uLuALxB/ZJ4Jb3I6KNU= +github.com/go-openapi/analysis v0.19.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2 h1:ophLETFestFZHk3ji7niPEL4d466QjW+0Tdg5VyDq7E= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0 h1:0Dn9qy1G9+UJfRU7TR8bmdGxb4uifB7HNrJjOnV0yPk= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go new file mode 100644 index 000000000000..e4b4a3cf7638 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/spec.go @@ -0,0 +1,298 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package loads + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "net/url" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/spec" + "github.com/go-openapi/swag" +) + +// JSONDoc loads a json document from either a file or a remote url +func JSONDoc(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil +} + +// DocLoader represents a doc loader type +type DocLoader func(string) (json.RawMessage, error) + +// DocMatcher represents a predicate to check if a loader matches +type DocMatcher func(string) bool + +var ( + loaders *loader + defaultLoader *loader +) + +func init() { + defaultLoader = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc} + loaders = defaultLoader + spec.PathLoader = loaders.Fn + AddLoader(swag.YAMLMatcher, swag.YAMLDoc) + + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) + //gob.Register(spec.Refable{}) +} + +// AddLoader for a document +func AddLoader(predicate DocMatcher, load DocLoader) { + prev := loaders + loaders = &loader{ + Match: predicate, + Fn: load, + Next: prev, + } + spec.PathLoader = loaders.Fn +} + +type loader struct { + Fn DocLoader + Match DocMatcher + Next *loader +} + +// JSONSpec loads a spec from a json document +func JSONSpec(path string) (*Document, error) { + data, err := JSONDoc(path) + if err != nil { + return nil, err + } + // convert to json + return Analyzed(data, "") +} + +// Document represents a swagger spec document +type Document struct { + // specAnalyzer + Analyzer *analysis.Spec + spec *spec.Swagger + specFilePath string + origSpec *spec.Swagger + schema *spec.Schema + raw json.RawMessage +} + +// Embedded returns a Document based on embedded specs. No analysis is required +func Embedded(orig, flat json.RawMessage) (*Document, error) { + var origSpec, flatSpec spec.Swagger + if err := json.Unmarshal(orig, &origSpec); err != nil { + return nil, err + } + if err := json.Unmarshal(flat, &flatSpec); err != nil { + return nil, err + } + return &Document{ + raw: orig, + origSpec: &origSpec, + spec: &flatSpec, + }, nil +} + +// Spec loads a new spec document +func Spec(path string) (*Document, error) { + specURL, err := url.Parse(path) + if err != nil { + return nil, err + } + var lastErr error + for l := loaders.Next; l != nil; l = l.Next { + if loaders.Match(specURL.Path) { + b, err2 := loaders.Fn(path) + if err2 != nil { + lastErr = err2 + continue + } + doc, err3 := Analyzed(b, "") + if err3 != nil { + return nil, err3 + } + if doc != nil { + doc.specFilePath = path + } + return doc, nil + } + } + if lastErr != nil { + return nil, lastErr + } + b, err := defaultLoader.Fn(path) + if err != nil { + return nil, err + } + + document, err := Analyzed(b, "") + if document != nil { + document.specFilePath = path + } + + return document, err +} + +// Analyzed creates a new analyzed spec document +func Analyzed(data json.RawMessage, version string) (*Document, error) { + if version == "" { + version = "2.0" + } + if version != "2.0" { + return nil, fmt.Errorf("spec version %q is not supported", version) + } + + raw := data + trimmed := bytes.TrimSpace(data) + if len(trimmed) > 0 { + if trimmed[0] != '{' && trimmed[0] != '[' { + yml, err := swag.BytesToYAMLDoc(trimmed) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + d, err := swag.YAMLToJSON(yml) + if err != nil { + return nil, fmt.Errorf("analyzed: %v", err) + } + raw = d + } + } + + swspec := new(spec.Swagger) + if err := json.Unmarshal(raw, swspec); err != nil { + return nil, err + } + + origsqspec, err := cloneSpec(swspec) + if err != nil { + return nil, err + } + + d := &Document{ + Analyzer: analysis.New(swspec), + schema: spec.MustLoadSwagger20Schema(), + spec: swspec, + raw: raw, + origSpec: origsqspec, + } + return d, nil +} + +// Expanded expands the ref fields in the spec document and returns a new spec document +func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { + swspec := new(spec.Swagger) + if err := json.Unmarshal(d.raw, swspec); err != nil { + return nil, err + } + + var expandOptions *spec.ExpandOptions + if len(options) > 0 { + expandOptions = options[0] + } else { + expandOptions = &spec.ExpandOptions{ + RelativeBase: d.specFilePath, + } + } + + if err := spec.ExpandSpec(swspec, expandOptions); err != nil { + return nil, err + } + + dd := &Document{ + Analyzer: analysis.New(swspec), + spec: swspec, + specFilePath: d.specFilePath, + schema: spec.MustLoadSwagger20Schema(), + raw: d.raw, + origSpec: d.origSpec, + } + return dd, nil +} + +// BasePath the base path for this spec +func (d *Document) BasePath() string { + return d.spec.BasePath +} + +// Version returns the version of this spec +func (d *Document) Version() string { + return d.spec.Swagger +} + +// Schema returns the swagger 2.0 schema +func (d *Document) Schema() *spec.Schema { + return d.schema +} + +// Spec returns the swagger spec object model +func (d *Document) Spec() *spec.Swagger { + return d.spec +} + +// Host returns the host for the API +func (d *Document) Host() string { + return d.spec.Host +} + +// Raw returns the raw swagger spec as json bytes +func (d *Document) Raw() json.RawMessage { + return d.raw +} + +// OrigSpec yields the original spec +func (d *Document) OrigSpec() *spec.Swagger { + return d.origSpec +} + +// ResetDefinitions gives a shallow copy with the models reset +func (d *Document) ResetDefinitions() *Document { + defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) + for k, v := range d.origSpec.Definitions { + defs[k] = v + } + + d.spec.Definitions = defs + return d +} + +// Pristine creates a new pristine document instance based on the input data +func (d *Document) Pristine() *Document { + dd, _ := Analyzed(d.Raw(), d.Version()) + return dd +} + +// SpecFilePath returns the file path of the spec if one is defined +func (d *Document) SpecFilePath() string { + return d.specFilePath +} + +func cloneSpec(src *spec.Swagger) (*spec.Swagger, error) { + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(src); err != nil { + return nil, err + } + + var dst spec.Swagger + if err := gob.NewDecoder(&b).Decode(&dst); err != nil { + return nil, err + } + return &dst, nil +} diff --git a/vendor/github.com/go-openapi/runtime/.editorconfig b/vendor/github.com/go-openapi/runtime/.editorconfig new file mode 100644 index 000000000000..3152da69a5d7 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/runtime/.gitignore b/vendor/github.com/go-openapi/runtime/.gitignore new file mode 100644 index 000000000000..fea8b84eca99 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.gitignore @@ -0,0 +1,5 @@ +secrets.yml +coverage.out +*.cov +*.out +playground diff --git a/vendor/github.com/go-openapi/runtime/.travis.yml b/vendor/github.com/go-openapi/runtime/.travis.yml new file mode 100644 index 000000000000..2fc7b58ff20b --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go +notifications: + slack: + secure: EmObnQuM9Mw8J9vpFaKKHqSMN4Wsr/A9+v7ewAD5cEhA0T1P4m7MbJMiJOhxUhj/X+BFh2DamW+P2lT8mybj5wg8wnkQ2BteKA8Tawi6f9PRw2NRheO8tAi8o/npLnlmet0kc93mn+oLuqHw36w4+j5mkOl2FghkfGiUVhwrhkCP7KXQN+3TU87e+/HzQumlJ3nsE+6terVxkH3PmaUTsS5ONaODZfuxFpfb7RsoEl3skHf6d+tr+1nViLxxly7558Nc33C+W1mr0qiEvMLZ+kJ/CpGWBJ6CUJM3jm6hNe2eMuIPwEK2hxZob8c7n22VPap4K6a0bBRoydoDXaba+2sD7Ym6ivDO/DVyL44VeBBLyIiIBylDGQdZH+6SoWm90Qe/i7tnY/T5Ao5igT8f3cfQY1c3EsTfqmlDfrhmACBmwSlgkdVBLTprHL63JMY24LWmh4jhxsmMRZhCL4dze8su1w6pLN/pD1pGHtKYCEVbdTmaM3PblNRFf12XB7qosmQsgUndH4Vq3bTbU0s1pKjeDhRyLvFzvR0TBbo0pDLEoF1A/i5GVFWa7yLZNUDudQERRh7qv/xBl2excIaQ1sV4DSVm7bAE9l6Kp+yeHQJW2uN6Y3X8wu9gB9nv9l5HBze7wh8KE6PyWAOLYYqZg9/sAtsv/2GcQqXcKFF1zcA= +script: +- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..9322b065e37a --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/runtime/LICENSE b/vendor/github.com/go-openapi/runtime/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md new file mode 100644 index 000000000000..5b1ec6494541 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/README.md @@ -0,0 +1,7 @@ +# runtime [![Build Status](https://travis-ci.org/go-openapi/runtime.svg?branch=client-context)](https://travis-ci.org/go-openapi/runtime) [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/runtime?status.svg)](http://godoc.org/github.com/go-openapi/runtime) + +# golang Open-API toolkit - runtime + +The runtime component for use in codegeneration or as untyped usage. diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go new file mode 100644 index 000000000000..4459025b9279 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/bytestream.go @@ -0,0 +1,155 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag" +) + +func defaultCloser() error { return nil } + +type byteStreamOpt func(opts *byteStreamOpts) + +// ClosesStream when the bytestream consumer or producer is finished +func ClosesStream(opts *byteStreamOpts) { + opts.Close = true +} + +type byteStreamOpts struct { + Close bool +} + +// ByteStreamConsumer creates a consmer for byte streams, +// takes a Writer/BinaryUnmarshaler interface or binary slice by reference, +// and reads from the provided reader +func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { + var vals byteStreamOpts + for _, opt := range opts { + opt(&vals) + } + + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("ByteStreamConsumer requires a reader") // early exit + } + + close := defaultCloser + if vals.Close { + if cl, ok := reader.(io.Closer); ok { + close = cl.Close + } + } + defer close() + + if wrtr, ok := data.(io.Writer); ok { + _, err := io.Copy(wrtr, reader) + return err + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + if bu, ok := data.(encoding.BinaryUnmarshaler); ok { + return bu.UnmarshalBinary(b) + } + + if t := reflect.TypeOf(data); data != nil && t.Kind() == reflect.Ptr { + v := reflect.Indirect(reflect.ValueOf(data)) + if t = v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { + v.SetBytes(b) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the ByteStreamConsumer, %s", + data, data, "can be resolved by supporting Writer/BinaryUnmarshaler interface") + }) +} + +// ByteStreamProducer creates a producer for byte streams, +// takes a Reader/BinaryMarshaler interface or binary slice, +// and writes to a writer (essentially a pipe) +func ByteStreamProducer(opts ...byteStreamOpt) Producer { + var vals byteStreamOpts + for _, opt := range opts { + opt(&vals) + } + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("ByteStreamProducer requires a writer") // early exit + } + close := defaultCloser + if vals.Close { + if cl, ok := writer.(io.Closer); ok { + close = cl.Close + } + } + defer close() + + if rc, ok := data.(io.ReadCloser); ok { + defer rc.Close() + } + + if rdr, ok := data.(io.Reader); ok { + _, err := io.Copy(writer, rdr) + return err + } + + if bm, ok := data.(encoding.BinaryMarshaler); ok { + bytes, err := bm.MarshalBinary() + if err != nil { + return err + } + + _, err = writer.Write(bytes) + return err + } + + if data != nil { + if e, ok := data.(error); ok { + _, err := writer.Write([]byte(e.Error())) + return err + } + + v := reflect.Indirect(reflect.ValueOf(data)) + if t := v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { + _, err := writer.Write(v.Bytes()) + return err + } + if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { + b, err := swag.WriteJSON(data) + if err != nil { + return err + } + _, err = writer.Write(b) + return err + } + } + + return fmt.Errorf("%v (%T) is not supported by the ByteStreamProducer, %s", + data, data, "can be resolved by supporting Reader/BinaryMarshaler interface") + }) +} diff --git a/vendor/github.com/go-openapi/runtime/client_auth_info.go b/vendor/github.com/go-openapi/runtime/client_auth_info.go new file mode 100644 index 000000000000..c6c97d9a7c34 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_auth_info.go @@ -0,0 +1,30 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import "github.com/go-openapi/strfmt" + +// A ClientAuthInfoWriterFunc converts a function to a request writer interface +type ClientAuthInfoWriterFunc func(ClientRequest, strfmt.Registry) error + +// AuthenticateRequest adds authentication data to the request +func (fn ClientAuthInfoWriterFunc) AuthenticateRequest(req ClientRequest, reg strfmt.Registry) error { + return fn(req, reg) +} + +// A ClientAuthInfoWriter implementor knows how to write authentication info to a request +type ClientAuthInfoWriter interface { + AuthenticateRequest(ClientRequest, strfmt.Registry) error +} diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go new file mode 100644 index 000000000000..fa21eacf3301 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_operation.go @@ -0,0 +1,41 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "context" + "net/http" +) + +// ClientOperation represents the context for a swagger operation to be submitted to the transport +type ClientOperation struct { + ID string + Method string + PathPattern string + ProducesMediaTypes []string + ConsumesMediaTypes []string + Schemes []string + AuthInfo ClientAuthInfoWriter + Params ClientRequestWriter + Reader ClientResponseReader + Context context.Context + Client *http.Client +} + +// A ClientTransport implementor knows how to submit Request objects to some destination +type ClientTransport interface { + //Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) + Submit(*ClientOperation) (interface{}, error) +} diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go new file mode 100644 index 000000000000..904196ae3e6e --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_request.go @@ -0,0 +1,103 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/go-openapi/strfmt" +) + +// ClientRequestWriterFunc converts a function to a request writer interface +type ClientRequestWriterFunc func(ClientRequest, strfmt.Registry) error + +// WriteToRequest adds data to the request +func (fn ClientRequestWriterFunc) WriteToRequest(req ClientRequest, reg strfmt.Registry) error { + return fn(req, reg) +} + +// ClientRequestWriter is an interface for things that know how to write to a request +type ClientRequestWriter interface { + WriteToRequest(ClientRequest, strfmt.Registry) error +} + +// ClientRequest is an interface for things that know how to +// add information to a swagger client request +type ClientRequest interface { + SetHeaderParam(string, ...string) error + + GetHeaderParams() http.Header + + SetQueryParam(string, ...string) error + + SetFormParam(string, ...string) error + + SetPathParam(string, string) error + + GetQueryParams() url.Values + + SetFileParam(string, ...NamedReadCloser) error + + SetBodyParam(interface{}) error + + SetTimeout(time.Duration) error + + GetMethod() string + + GetPath() string + + GetBody() []byte + + GetBodyParam() interface{} + + GetFileParam() map[string][]NamedReadCloser +} + +// NamedReadCloser represents a named ReadCloser interface +type NamedReadCloser interface { + io.ReadCloser + Name() string +} + +// NamedReader creates a NamedReadCloser for use as file upload +func NamedReader(name string, rdr io.Reader) NamedReadCloser { + rc, ok := rdr.(io.ReadCloser) + if !ok { + rc = ioutil.NopCloser(rdr) + } + return &namedReadCloser{ + name: name, + cr: rc, + } +} + +type namedReadCloser struct { + name string + cr io.ReadCloser +} + +func (n *namedReadCloser) Close() error { + return n.cr.Close() +} +func (n *namedReadCloser) Read(p []byte) (int, error) { + return n.cr.Read(p) +} +func (n *namedReadCloser) Name() string { + return n.name +} diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go new file mode 100644 index 000000000000..729e18b2283a --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/client_response.go @@ -0,0 +1,63 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "fmt" + "io" +) + +// A ClientResponse represents a client response +// This bridges between responses obtained from different transports +type ClientResponse interface { + Code() int + Message() string + GetHeader(string) string + Body() io.ReadCloser +} + +// A ClientResponseReaderFunc turns a function into a ClientResponseReader interface implementation +type ClientResponseReaderFunc func(ClientResponse, Consumer) (interface{}, error) + +// ReadResponse reads the response +func (read ClientResponseReaderFunc) ReadResponse(resp ClientResponse, consumer Consumer) (interface{}, error) { + return read(resp, consumer) +} + +// A ClientResponseReader is an interface for things want to read a response. +// An application of this is to create structs from response values +type ClientResponseReader interface { + ReadResponse(ClientResponse, Consumer) (interface{}, error) +} + +// NewAPIError creates a new API error +func NewAPIError(opName string, payload interface{}, code int) *APIError { + return &APIError{ + OperationName: opName, + Response: payload, + Code: code, + } +} + +// APIError wraps an error model and captures the status code +type APIError struct { + OperationName string + Response interface{} + Code int +} + +func (a *APIError) Error() string { + return fmt.Sprintf("%s (status %d): %+v ", a.OperationName, a.Code, a.Response) +} diff --git a/vendor/github.com/go-openapi/runtime/constants.go b/vendor/github.com/go-openapi/runtime/constants.go new file mode 100644 index 000000000000..a4de897adcdf --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/constants.go @@ -0,0 +1,47 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +const ( + // HeaderContentType represents a http content-type header, it's value is supposed to be a mime type + HeaderContentType = "Content-Type" + + // HeaderTransferEncoding represents a http transfer-encoding header. + HeaderTransferEncoding = "Transfer-Encoding" + + // HeaderAccept the Accept header + HeaderAccept = "Accept" + + charsetKey = "charset" + + // DefaultMime the default fallback mime type + DefaultMime = "application/octet-stream" + // JSONMime the json mime type + JSONMime = "application/json" + // YAMLMime the yaml mime type + YAMLMime = "application/x-yaml" + // XMLMime the xml mime type + XMLMime = "application/xml" + // TextMime the text mime type + TextMime = "text/plain" + // HTMLMime the html mime type + HTMLMime = "text/html" + // CSVMime the csv mime type + CSVMime = "text/csv" + // MultipartFormMime the multipart form mime type + MultipartFormMime = "multipart/form-data" + // URLencodedFormMime the url encoded form mime type + URLencodedFormMime = "application/x-www-form-urlencoded" +) diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go new file mode 100644 index 000000000000..d807bd915b44 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/csv.go @@ -0,0 +1,77 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding/csv" + "errors" + "io" +) + +// CSVConsumer creates a new CSV consumer +func CSVConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("CSVConsumer requires a reader") + } + + csvReader := csv.NewReader(reader) + writer, ok := data.(io.Writer) + if !ok { + return errors.New("data type must be io.Writer") + } + csvWriter := csv.NewWriter(writer) + records, err := csvReader.ReadAll() + if err != nil { + return err + } + for _, r := range records { + if err := csvWriter.Write(r); err != nil { + return err + } + } + csvWriter.Flush() + return nil + }) +} + +// CSVProducer creates a new CSV producer +func CSVProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("CSVProducer requires a writer") + } + + dataBytes, ok := data.([]byte) + if !ok { + return errors.New("data type must be byte array") + } + + csvReader := csv.NewReader(bytes.NewBuffer(dataBytes)) + records, err := csvReader.ReadAll() + if err != nil { + return err + } + csvWriter := csv.NewWriter(writer) + for _, r := range records { + if err := csvWriter.Write(r); err != nil { + return err + } + } + csvWriter.Flush() + return nil + }) +} diff --git a/vendor/github.com/go-openapi/runtime/discard.go b/vendor/github.com/go-openapi/runtime/discard.go new file mode 100644 index 000000000000..0d390cfd64c0 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/discard.go @@ -0,0 +1,9 @@ +package runtime + +import "io" + +// DiscardConsumer does absolutely nothing, it's a black hole. +var DiscardConsumer = ConsumerFunc(func(_ io.Reader, _ interface{}) error { return nil }) + +// DiscardProducer does absolutely nothing, it's a black hole. +var DiscardProducer = ProducerFunc(func(_ io.Writer, _ interface{}) error { return nil }) diff --git a/vendor/github.com/go-openapi/runtime/file.go b/vendor/github.com/go-openapi/runtime/file.go new file mode 100644 index 000000000000..85971c18c4b5 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/file.go @@ -0,0 +1,33 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import "mime/multipart" + +// File represents an uploaded file. +type File struct { + Data multipart.File + Header *multipart.FileHeader +} + +// Read bytes from the file +func (f *File) Read(p []byte) (n int, err error) { + return f.Data.Read(p) +} + +// Close the file +func (f *File) Close() error { + return f.Data.Close() +} diff --git a/vendor/github.com/go-openapi/runtime/flagext/byte_size.go b/vendor/github.com/go-openapi/runtime/flagext/byte_size.go new file mode 100644 index 000000000000..0f3eeba1f6f4 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/flagext/byte_size.go @@ -0,0 +1,38 @@ +package flagext + +import ( + "github.com/docker/go-units" +) + +// ByteSize used to pass byte sizes to a go-flags CLI +type ByteSize int + +// MarshalFlag implements go-flags Marshaller interface +func (b ByteSize) MarshalFlag() (string, error) { + return units.HumanSize(float64(b)), nil +} + +// UnmarshalFlag implements go-flags Unmarshaller interface +func (b *ByteSize) UnmarshalFlag(value string) error { + sz, err := units.FromHumanSize(value) + if err != nil { + return err + } + *b = ByteSize(int(sz)) + return nil +} + +// String method for a bytesize (pflag value and stringer interface) +func (b ByteSize) String() string { + return units.HumanSize(float64(b)) +} + +// Set the value of this bytesize (pflag value interfaces) +func (b *ByteSize) Set(value string) error { + return b.UnmarshalFlag(value) +} + +// Type returns the type of the pflag value (pflag value interface) +func (b *ByteSize) Type() string { + return "byte-size" +} diff --git a/vendor/github.com/go-openapi/runtime/go.mod b/vendor/github.com/go-openapi/runtime/go.mod new file mode 100644 index 000000000000..ddf6f1613af5 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/go.mod @@ -0,0 +1,16 @@ +module github.com/go-openapi/runtime + +require ( + github.com/docker/go-units v0.4.0 + github.com/go-openapi/analysis v0.19.2 + github.com/go-openapi/errors v0.19.2 + github.com/go-openapi/loads v0.19.2 + github.com/go-openapi/spec v0.19.2 + github.com/go-openapi/strfmt v0.19.0 + github.com/go-openapi/swag v0.19.2 + github.com/go-openapi/validate v0.19.2 + github.com/stretchr/testify v1.3.0 + golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56 // indirect + golang.org/x/tools v0.0.0-20190617190820-da514acc4774 // indirect + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/vendor/github.com/go-openapi/runtime/go.sum b/vendor/github.com/go-openapi/runtime/go.sum new file mode 100644 index 000000000000..bdb97c3a8a7a --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/go.sum @@ -0,0 +1,133 @@ +github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf h1:eg0MeVzsP1G42dRafH3vf+al2vQIJU0YHX+1Tw87oco= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.17.2 h1:eYp14J1o8TTSCzndHBtsNuckikV1PfZOSnx4BcBeu0c= +github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2 h1:ophLETFestFZHk3ji7niPEL4d466QjW+0Tdg5VyDq7E= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.17.2 h1:azEQ8Fnx0jmtFF2fxsnmd6I0x6rsweUF63qqSO1NmKk= +github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.17.2 h1:3ekBy41gar/iJi2KSh/au/PrC2vpLr85upF/UZmm3W0= +github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.17.2 h1:lF3z7AH8dd0IKXc1zEBi1dj0B4XgVb5cVjn39dCK3Ls= +github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.17.2 h1:tEXYu6Xc0pevpzzQx5ghrMN9F7IVpN/+u4iD3rkYE5o= +github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2 h1:rf5ArTHmIJxyV5Oiks+Su0mUens1+AjpkPoWr5xFRcI= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.17.2 h1:eb2NbuCnoe8cWAxhtK6CfMWUYmiFEZJ9Hx3Z2WRwJ5M= +github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.17.2 h1:2KDns36DMHXG9/iYkOjiX+/8fKK9GCU5ELZ+J6qcRVA= +github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0 h1:0Dn9qy1G9+UJfRU7TR8bmdGxb4uifB7HNrJjOnV0yPk= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.17.2 h1:K/ycE/XTUDFltNHSO32cGRUhrVGJD64o8WgAIZNyc3k= +github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.17.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.17.2 h1:lwFfiS4sv5DvOrsYDsYq4N7UU8ghXiYtPJ+VcQnC3Xg= +github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2 h1:ky5l57HjyVRrsJfd2+Ro5Z9PjGuKbsmftwyMtk8H7js= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe h1:W/GaMY0y69G4cFlmsC6B9sbuo2fP8OFP1ABjt4kPz+w= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53 h1:kcXqo9vE6fsZY5X5Rd7R1l7fTgnWaDCVmln65REefiE= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/runtime/headers.go b/vendor/github.com/go-openapi/runtime/headers.go new file mode 100644 index 000000000000..4d111db4fec0 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/headers.go @@ -0,0 +1,45 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "mime" + "net/http" + + "github.com/go-openapi/errors" +) + +// ContentType parses a content type header +func ContentType(headers http.Header) (string, string, error) { + ct := headers.Get(HeaderContentType) + orig := ct + if ct == "" { + ct = DefaultMime + } + if ct == "" { + return "", "", nil + } + + mt, opts, err := mime.ParseMediaType(ct) + if err != nil { + return "", "", errors.NewParseError(HeaderContentType, "header", orig, err) + } + + if cs, ok := opts[charsetKey]; ok { + return mt, cs, nil + } + + return mt, "", nil +} diff --git a/vendor/github.com/go-openapi/runtime/interfaces.go b/vendor/github.com/go-openapi/runtime/interfaces.go new file mode 100644 index 000000000000..65de0aa44b96 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/interfaces.go @@ -0,0 +1,103 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "io" + "net/http" + + "github.com/go-openapi/strfmt" +) + +// OperationHandlerFunc an adapter for a function to the OperationHandler interface +type OperationHandlerFunc func(interface{}) (interface{}, error) + +// Handle implements the operation handler interface +func (s OperationHandlerFunc) Handle(data interface{}) (interface{}, error) { + return s(data) +} + +// OperationHandler a handler for a swagger operation +type OperationHandler interface { + Handle(interface{}) (interface{}, error) +} + +// ConsumerFunc represents a function that can be used as a consumer +type ConsumerFunc func(io.Reader, interface{}) error + +// Consume consumes the reader into the data parameter +func (fn ConsumerFunc) Consume(reader io.Reader, data interface{}) error { + return fn(reader, data) +} + +// Consumer implementations know how to bind the values on the provided interface to +// data provided by the request body +type Consumer interface { + // Consume performs the binding of request values + Consume(io.Reader, interface{}) error +} + +// ProducerFunc represents a function that can be used as a producer +type ProducerFunc func(io.Writer, interface{}) error + +// Produce produces the response for the provided data +func (f ProducerFunc) Produce(writer io.Writer, data interface{}) error { + return f(writer, data) +} + +// Producer implementations know how to turn the provided interface into a valid +// HTTP response +type Producer interface { + // Produce writes to the http response + Produce(io.Writer, interface{}) error +} + +// AuthenticatorFunc turns a function into an authenticator +type AuthenticatorFunc func(interface{}) (bool, interface{}, error) + +// Authenticate authenticates the request with the provided data +func (f AuthenticatorFunc) Authenticate(params interface{}) (bool, interface{}, error) { + return f(params) +} + +// Authenticator represents an authentication strategy +// implementations of Authenticator know how to authenticate the +// request data and translate that into a valid principal object or an error +type Authenticator interface { + Authenticate(interface{}) (bool, interface{}, error) +} + +// AuthorizerFunc turns a function into an authorizer +type AuthorizerFunc func(*http.Request, interface{}) error + +// Authorize authorizes the processing of the request for the principal +func (f AuthorizerFunc) Authorize(r *http.Request, principal interface{}) error { + return f(r, principal) +} + +// Authorizer represents an authorization strategy +// implementations of Authorizer know how to authorize the principal object +// using the request data and returns error if unauthorized +type Authorizer interface { + Authorize(*http.Request, interface{}) error +} + +// Validatable types implementing this interface allow customizing their validation +// this will be used instead of the reflective validation based on the spec document. +// the implementations are assumed to have been generated by the swagger tool so they should +// contain all the validations obtained from the spec +type Validatable interface { + Validate(strfmt.Registry) error +} diff --git a/vendor/github.com/go-openapi/runtime/json.go b/vendor/github.com/go-openapi/runtime/json.go new file mode 100644 index 000000000000..5a690559cc59 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/json.go @@ -0,0 +1,38 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONConsumer creates a new JSON consumer +func JSONConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + dec := json.NewDecoder(reader) + dec.UseNumber() // preserve number formats + return dec.Decode(data) + }) +} + +// JSONProducer creates a new JSON producer +func JSONProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + enc := json.NewEncoder(writer) + enc.SetEscapeHTML(false) + return enc.Encode(data) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/logger/logger.go b/vendor/github.com/go-openapi/runtime/logger/logger.go new file mode 100644 index 000000000000..d62c1f708fa8 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/logger/logger.go @@ -0,0 +1,12 @@ +package logger + +import "os" + +type Logger interface { + Printf(format string, args ...interface{}) + Debugf(format string, args ...interface{}) +} + +func DebugEnabled() bool { + return os.Getenv("SWAGGER_DEBUG") != "" || os.Getenv("DEBUG") != "" +} diff --git a/vendor/github.com/go-openapi/runtime/logger/standard.go b/vendor/github.com/go-openapi/runtime/logger/standard.go new file mode 100644 index 000000000000..f7e67ebb9e75 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/logger/standard.go @@ -0,0 +1,22 @@ +package logger + +import ( + "fmt" + "os" +) + +type StandardLogger struct{} + +func (StandardLogger) Printf(format string, args ...interface{}) { + if len(format) == 0 || format[len(format)-1] != '\n' { + format += "\n" + } + fmt.Fprintf(os.Stderr, format, args...) +} + +func (StandardLogger) Debugf(format string, args ...interface{}) { + if len(format) == 0 || format[len(format)-1] != '\n' { + format += "\n" + } + fmt.Fprintf(os.Stderr, format, args...) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/context.go b/vendor/github.com/go-openapi/runtime/middleware/context.go new file mode 100644 index 000000000000..54a8c21f1550 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/context.go @@ -0,0 +1,590 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + stdContext "context" + "fmt" + "net/http" + "strings" + "sync" + + "github.com/go-openapi/runtime/security" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/logger" + "github.com/go-openapi/runtime/middleware/untyped" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// Debug when true turns on verbose logging +var Debug = logger.DebugEnabled() +var Logger logger.Logger = logger.StandardLogger{} + +func debugLog(format string, args ...interface{}) { + if Debug { + Logger.Printf(format, args...) + } +} + +// A Builder can create middlewares +type Builder func(http.Handler) http.Handler + +// PassthroughBuilder returns the handler, aka the builder identity function +func PassthroughBuilder(handler http.Handler) http.Handler { return handler } + +// RequestBinder is an interface for types to implement +// when they want to be able to bind from a request +type RequestBinder interface { + BindRequest(*http.Request, *MatchedRoute) error +} + +// Responder is an interface for types to implement +// when they want to be considered for writing HTTP responses +type Responder interface { + WriteResponse(http.ResponseWriter, runtime.Producer) +} + +// ResponderFunc wraps a func as a Responder interface +type ResponderFunc func(http.ResponseWriter, runtime.Producer) + +// WriteResponse writes to the response +func (fn ResponderFunc) WriteResponse(rw http.ResponseWriter, pr runtime.Producer) { + fn(rw, pr) +} + +// Context is a type safe wrapper around an untyped request context +// used throughout to store request context with the standard context attached +// to the http.Request +type Context struct { + spec *loads.Document + analyzer *analysis.Spec + api RoutableAPI + router Router +} + +type routableUntypedAPI struct { + api *untyped.API + hlock *sync.Mutex + handlers map[string]map[string]http.Handler + defaultConsumes string + defaultProduces string +} + +func newRoutableUntypedAPI(spec *loads.Document, api *untyped.API, context *Context) *routableUntypedAPI { + var handlers map[string]map[string]http.Handler + if spec == nil || api == nil { + return nil + } + analyzer := analysis.New(spec.Spec()) + for method, hls := range analyzer.Operations() { + um := strings.ToUpper(method) + for path, op := range hls { + schemes := analyzer.SecurityRequirementsFor(op) + + if oh, ok := api.OperationHandlerFor(method, path); ok { + if handlers == nil { + handlers = make(map[string]map[string]http.Handler) + } + if b, ok := handlers[um]; !ok || b == nil { + handlers[um] = make(map[string]http.Handler) + } + + var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // lookup route info in the context + route, rCtx, _ := context.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + + // bind and validate the request using reflection + var bound interface{} + var validation error + bound, r, validation = context.BindAndValidate(r, route) + if validation != nil { + context.Respond(w, r, route.Produces, route, validation) + return + } + + // actually handle the request + result, err := oh.Handle(bound) + if err != nil { + // respond with failure + context.Respond(w, r, route.Produces, route, err) + return + } + + // respond with success + context.Respond(w, r, route.Produces, route, result) + }) + + if len(schemes) > 0 { + handler = newSecureAPI(context, handler) + } + handlers[um][path] = handler + } + } + } + + return &routableUntypedAPI{ + api: api, + hlock: new(sync.Mutex), + handlers: handlers, + defaultProduces: api.DefaultProduces, + defaultConsumes: api.DefaultConsumes, + } +} + +func (r *routableUntypedAPI) HandlerFor(method, path string) (http.Handler, bool) { + r.hlock.Lock() + paths, ok := r.handlers[strings.ToUpper(method)] + if !ok { + r.hlock.Unlock() + return nil, false + } + handler, ok := paths[path] + r.hlock.Unlock() + return handler, ok +} +func (r *routableUntypedAPI) ServeErrorFor(operationID string) func(http.ResponseWriter, *http.Request, error) { + return r.api.ServeError +} +func (r *routableUntypedAPI) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { + return r.api.ConsumersFor(mediaTypes) +} +func (r *routableUntypedAPI) ProducersFor(mediaTypes []string) map[string]runtime.Producer { + return r.api.ProducersFor(mediaTypes) +} +func (r *routableUntypedAPI) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { + return r.api.AuthenticatorsFor(schemes) +} +func (r *routableUntypedAPI) Authorizer() runtime.Authorizer { + return r.api.Authorizer() +} +func (r *routableUntypedAPI) Formats() strfmt.Registry { + return r.api.Formats() +} + +func (r *routableUntypedAPI) DefaultProduces() string { + return r.defaultProduces +} + +func (r *routableUntypedAPI) DefaultConsumes() string { + return r.defaultConsumes +} + +// NewRoutableContext creates a new context for a routable API +func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Router) *Context { + var an *analysis.Spec + if spec != nil { + an = analysis.New(spec.Spec()) + } + ctx := &Context{spec: spec, api: routableAPI, analyzer: an, router: routes} + return ctx +} + +// NewContext creates a new context wrapper +func NewContext(spec *loads.Document, api *untyped.API, routes Router) *Context { + var an *analysis.Spec + if spec != nil { + an = analysis.New(spec.Spec()) + } + ctx := &Context{spec: spec, analyzer: an} + ctx.api = newRoutableUntypedAPI(spec, api, ctx) + ctx.router = routes + return ctx +} + +// Serve serves the specified spec with the specified api registrations as a http.Handler +func Serve(spec *loads.Document, api *untyped.API) http.Handler { + return ServeWithBuilder(spec, api, PassthroughBuilder) +} + +// ServeWithBuilder serves the specified spec with the specified api registrations as a http.Handler that is decorated +// by the Builder +func ServeWithBuilder(spec *loads.Document, api *untyped.API, builder Builder) http.Handler { + context := NewContext(spec, api, nil) + return context.APIHandler(builder) +} + +type contextKey int8 + +const ( + _ contextKey = iota + ctxContentType + ctxResponseFormat + ctxMatchedRoute + ctxBoundParams + ctxSecurityPrincipal + ctxSecurityScopes +) + +// MatchedRouteFrom request context value. +func MatchedRouteFrom(req *http.Request) *MatchedRoute { + mr := req.Context().Value(ctxMatchedRoute) + if mr == nil { + return nil + } + if res, ok := mr.(*MatchedRoute); ok { + return res + } + return nil +} + +// SecurityPrincipalFrom request context value. +func SecurityPrincipalFrom(req *http.Request) interface{} { + return req.Context().Value(ctxSecurityPrincipal) +} + +// SecurityScopesFrom request context value. +func SecurityScopesFrom(req *http.Request) []string { + rs := req.Context().Value(ctxSecurityScopes) + if res, ok := rs.([]string); ok { + return res + } + return nil +} + +type contentTypeValue struct { + MediaType string + Charset string +} + +// BasePath returns the base path for this API +func (c *Context) BasePath() string { + return c.spec.BasePath() +} + +// RequiredProduces returns the accepted content types for responses +func (c *Context) RequiredProduces() []string { + return c.analyzer.RequiredProduces() +} + +// BindValidRequest binds a params object to a request but only when the request is valid +// if the request is not valid an error will be returned +func (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, binder RequestBinder) error { + var res []error + + requestContentType := "*/*" + // check and validate content type, select consumer + if runtime.HasBody(request) { + ct, _, err := runtime.ContentType(request.Header) + if err != nil { + res = append(res, err) + } else { + if err := validateContentType(route.Consumes, ct); err != nil { + res = append(res, err) + } + if len(res) == 0 { + cons, ok := route.Consumers[ct] + if !ok { + res = append(res, errors.New(500, "no consumer registered for %s", ct)) + } else { + route.Consumer = cons + requestContentType = ct + } + } + } + } + + // check and validate the response format + if len(res) == 0 && runtime.HasBody(request) { + if str := NegotiateContentType(request, route.Produces, requestContentType); str == "" { + res = append(res, errors.InvalidResponseFormat(request.Header.Get(runtime.HeaderAccept), route.Produces)) + } + } + + // now bind the request with the provided binder + // it's assumed the binder will also validate the request and return an error if the + // request is invalid + if binder != nil && len(res) == 0 { + if err := binder.BindRequest(request, route); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContentType gets the parsed value of a content type +// Returns the media type, its charset and a shallow copy of the request +// when its context doesn't contain the content type value, otherwise it returns +// the same request +// Returns the error that runtime.ContentType may retunrs. +func (c *Context) ContentType(request *http.Request) (string, string, *http.Request, error) { + var rCtx = request.Context() + + if v, ok := rCtx.Value(ctxContentType).(*contentTypeValue); ok { + return v.MediaType, v.Charset, request, nil + } + + mt, cs, err := runtime.ContentType(request.Header) + if err != nil { + return "", "", nil, err + } + rCtx = stdContext.WithValue(rCtx, ctxContentType, &contentTypeValue{mt, cs}) + return mt, cs, request.WithContext(rCtx), nil +} + +// LookupRoute looks a route up and returns true when it is found +func (c *Context) LookupRoute(request *http.Request) (*MatchedRoute, bool) { + if route, ok := c.router.Lookup(request.Method, request.URL.EscapedPath()); ok { + return route, ok + } + return nil, false +} + +// RouteInfo tries to match a route for this request +// Returns the matched route, a shallow copy of the request if its context +// contains the matched router, otherwise the same request, and a bool to +// indicate if it the request matches one of the routes, if it doesn't +// then it returns false and nil for the other two return values +func (c *Context) RouteInfo(request *http.Request) (*MatchedRoute, *http.Request, bool) { + var rCtx = request.Context() + + if v, ok := rCtx.Value(ctxMatchedRoute).(*MatchedRoute); ok { + return v, request, ok + } + + if route, ok := c.LookupRoute(request); ok { + rCtx = stdContext.WithValue(rCtx, ctxMatchedRoute, route) + return route, request.WithContext(rCtx), ok + } + + return nil, nil, false +} + +// ResponseFormat negotiates the response content type +// Returns the response format and a shallow copy of the request if its context +// doesn't contain the response format, otherwise the same request +func (c *Context) ResponseFormat(r *http.Request, offers []string) (string, *http.Request) { + var rCtx = r.Context() + + if v, ok := rCtx.Value(ctxResponseFormat).(string); ok { + debugLog("[%s %s] found response format %q in context", r.Method, r.URL.Path, v) + return v, r + } + + format := NegotiateContentType(r, offers, "") + if format != "" { + debugLog("[%s %s] set response format %q in context", r.Method, r.URL.Path, format) + r = r.WithContext(stdContext.WithValue(rCtx, ctxResponseFormat, format)) + } + debugLog("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format) + return format, r +} + +// AllowedMethods gets the allowed methods for the path of this request +func (c *Context) AllowedMethods(request *http.Request) []string { + return c.router.OtherMethods(request.Method, request.URL.EscapedPath()) +} + +// ResetAuth removes the current principal from the request context +func (c *Context) ResetAuth(request *http.Request) *http.Request { + rctx := request.Context() + rctx = stdContext.WithValue(rctx, ctxSecurityPrincipal, nil) + rctx = stdContext.WithValue(rctx, ctxSecurityScopes, nil) + return request.WithContext(rctx) +} + +// Authorize authorizes the request +// Returns the principal object and a shallow copy of the request when its +// context doesn't contain the principal, otherwise the same request or an error +// (the last) if one of the authenticators returns one or an Unauthenticated error +func (c *Context) Authorize(request *http.Request, route *MatchedRoute) (interface{}, *http.Request, error) { + if route == nil || !route.HasAuth() { + return nil, nil, nil + } + + var rCtx = request.Context() + if v := rCtx.Value(ctxSecurityPrincipal); v != nil { + return v, request, nil + } + + applies, usr, err := route.Authenticators.Authenticate(request, route) + if !applies || err != nil || !route.Authenticators.AllowsAnonymous() && usr == nil { + if err != nil { + return nil, nil, err + } + return nil, nil, errors.Unauthenticated("invalid credentials") + } + if route.Authorizer != nil { + if err := route.Authorizer.Authorize(request, usr); err != nil { + return nil, nil, errors.New(http.StatusForbidden, err.Error()) + } + } + + rCtx = stdContext.WithValue(rCtx, ctxSecurityPrincipal, usr) + rCtx = stdContext.WithValue(rCtx, ctxSecurityScopes, route.Authenticator.AllScopes()) + return usr, request.WithContext(rCtx), nil +} + +// BindAndValidate binds and validates the request +// Returns the validation map and a shallow copy of the request when its context +// doesn't contain the validation, otherwise it returns the same request or an +// CompositeValidationError error +func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute) (interface{}, *http.Request, error) { + var rCtx = request.Context() + + if v, ok := rCtx.Value(ctxBoundParams).(*validation); ok { + debugLog("got cached validation (valid: %t)", len(v.result) == 0) + if len(v.result) > 0 { + return v.bound, request, errors.CompositeValidationError(v.result...) + } + return v.bound, request, nil + } + result := validateRequest(c, request, matched) + rCtx = stdContext.WithValue(rCtx, ctxBoundParams, result) + request = request.WithContext(rCtx) + if len(result.result) > 0 { + return result.bound, request, errors.CompositeValidationError(result.result...) + } + debugLog("no validation errors found") + return result.bound, request, nil +} + +// NotFound the default not found responder for when no route has been matched yet +func (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) { + c.Respond(rw, r, []string{c.api.DefaultProduces()}, nil, errors.NotFound("not found")) +} + +// Respond renders the response after doing some content negotiation +func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data interface{}) { + debugLog("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces) + offers := []string{} + for _, mt := range produces { + if mt != c.api.DefaultProduces() { + offers = append(offers, mt) + } + } + // the default producer is last so more specific producers take precedence + offers = append(offers, c.api.DefaultProduces()) + debugLog("offers: %v", offers) + + var format string + format, r = c.ResponseFormat(r, offers) + rw.Header().Set(runtime.HeaderContentType, format) + + if resp, ok := data.(Responder); ok { + producers := route.Producers + prod, ok := producers[format] + if !ok { + prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) + pr, ok := prods[c.api.DefaultProduces()] + if !ok { + panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) + } + prod = pr + } + resp.WriteResponse(rw, prod) + return + } + + if err, ok := data.(error); ok { + if format == "" { + rw.Header().Set(runtime.HeaderContentType, runtime.JSONMime) + } + + if realm := security.FailedBasicAuth(r); realm != "" { + rw.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", realm)) + } + + if route == nil || route.Operation == nil { + c.api.ServeErrorFor("")(rw, r, err) + return + } + c.api.ServeErrorFor(route.Operation.ID)(rw, r, err) + return + } + + if route == nil || route.Operation == nil { + rw.WriteHeader(200) + if r.Method == "HEAD" { + return + } + producers := c.api.ProducersFor(normalizeOffers(offers)) + prod, ok := producers[format] + if !ok { + panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) + } + if err := prod.Produce(rw, data); err != nil { + panic(err) // let the recovery middleware deal with this + } + return + } + + if _, code, ok := route.Operation.SuccessResponse(); ok { + rw.WriteHeader(code) + if code == 204 || r.Method == "HEAD" { + return + } + + producers := route.Producers + prod, ok := producers[format] + if !ok { + if !ok { + prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) + pr, ok := prods[c.api.DefaultProduces()] + if !ok { + panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) + } + prod = pr + } + } + if err := prod.Produce(rw, data); err != nil { + panic(err) // let the recovery middleware deal with this + } + return + } + + c.api.ServeErrorFor(route.Operation.ID)(rw, r, errors.New(http.StatusInternalServerError, "can't produce response")) +} + +// APIHandler returns a handler to serve the API, this includes a swagger spec, router and the contract defined in the swagger spec +func (c *Context) APIHandler(builder Builder) http.Handler { + b := builder + if b == nil { + b = PassthroughBuilder + } + + var title string + sp := c.spec.Spec() + if sp != nil && sp.Info != nil && sp.Info.Title != "" { + title = sp.Info.Title + } + + redocOpts := RedocOpts{ + BasePath: c.BasePath(), + Title: title, + } + + return Spec("", c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b))) +} + +// RoutesHandler returns a handler to serve the API, just the routes and the contract defined in the swagger spec +func (c *Context) RoutesHandler(builder Builder) http.Handler { + b := builder + if b == nil { + b = PassthroughBuilder + } + return NewRouter(c, b(NewOperationExecutor(c))) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE b/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE new file mode 100644 index 000000000000..e65039ad84ca --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2014 Naoya Inada + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/README.md b/vendor/github.com/go-openapi/runtime/middleware/denco/README.md new file mode 100644 index 000000000000..30109e17d5ed --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/README.md @@ -0,0 +1,180 @@ +# Denco [![Build Status](https://travis-ci.org/naoina/denco.png?branch=master)](https://travis-ci.org/naoina/denco) + +The fast and flexible HTTP request router for [Go](http://golang.org). + +Denco is based on Double-Array implementation of [Kocha-urlrouter](https://github.com/naoina/kocha-urlrouter). +However, Denco is optimized and some features added. + +## Features + +* Fast (See [go-http-routing-benchmark](https://github.com/naoina/go-http-routing-benchmark)) +* [URL patterns](#url-patterns) (`/foo/:bar` and `/foo/*wildcard`) +* Small (but enough) URL router API +* HTTP request multiplexer like `http.ServeMux` + +## Installation + + go get -u github.com/go-openapi/runtime/middleware/denco + +## Using as HTTP request multiplexer + +```go +package main + +import ( + "fmt" + "log" + "net/http" + + "github.com/go-openapi/runtime/middleware/denco" +) + +func Index(w http.ResponseWriter, r *http.Request, params denco.Params) { + fmt.Fprintf(w, "Welcome to Denco!\n") +} + +func User(w http.ResponseWriter, r *http.Request, params denco.Params) { + fmt.Fprintf(w, "Hello %s!\n", params.Get("name")) +} + +func main() { + mux := denco.NewMux() + handler, err := mux.Build([]denco.Handler{ + mux.GET("/", Index), + mux.GET("/user/:name", User), + mux.POST("/user/:name", User), + }) + if err != nil { + panic(err) + } + log.Fatal(http.ListenAndServe(":8080", handler)) +} +``` + +## Using as URL router + +```go +package main + +import ( + "fmt" + + "github.com/go-openapi/runtime/middleware/denco" +) + +type route struct { + name string +} + +func main() { + router := denco.New() + router.Build([]denco.Record{ + {"/", &route{"root"}}, + {"/user/:id", &route{"user"}}, + {"/user/:name/:id", &route{"username"}}, + {"/static/*filepath", &route{"static"}}, + }) + + data, params, found := router.Lookup("/") + // print `&main.route{name:"root"}, denco.Params(nil), true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) + + data, params, found = router.Lookup("/user/hoge") + // print `&main.route{name:"user"}, denco.Params{denco.Param{Name:"id", Value:"hoge"}}, true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) + + data, params, found = router.Lookup("/user/hoge/7") + // print `&main.route{name:"username"}, denco.Params{denco.Param{Name:"name", Value:"hoge"}, denco.Param{Name:"id", Value:"7"}}, true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) + + data, params, found = router.Lookup("/static/path/to/file") + // print `&main.route{name:"static"}, denco.Params{denco.Param{Name:"filepath", Value:"path/to/file"}}, true`. + fmt.Printf("%#v, %#v, %#v\n", data, params, found) +} +``` + +See [Godoc](http://godoc.org/github.com/go-openapi/runtime/middleware/denco) for more details. + +## Getting the value of path parameter + +You can get the value of path parameter by 2 ways. + +1. Using [`denco.Params.Get`](http://godoc.org/github.com/go-openapi/runtime/middleware/denco#Params.Get) method +2. Find by loop + +```go +package main + +import ( + "fmt" + + "github.com/go-openapi/runtime/middleware/denco" +) + +func main() { + router := denco.New() + if err := router.Build([]denco.Record{ + {"/user/:name/:id", "route1"}, + }); err != nil { + panic(err) + } + + // 1. Using denco.Params.Get method. + _, params, _ := router.Lookup("/user/alice/1") + name := params.Get("name") + if name != "" { + fmt.Printf("Hello %s.\n", name) // prints "Hello alice.". + } + + // 2. Find by loop. + for _, param := range params { + if param.Name == "name" { + fmt.Printf("Hello %s.\n", name) // prints "Hello alice.". + } + } +} +``` + +## URL patterns + +Denco's route matching strategy is "most nearly matching". + +When routes `/:name` and `/alice` have been built, URI `/alice` matches the route `/alice`, not `/:name`. +Because URI `/alice` is more match with the route `/alice` than `/:name`. + +For more example, when routes below have been built: + +``` +/user/alice +/user/:name +/user/:name/:id +/user/alice/:id +/user/:id/bob +``` + +Routes matching are: + +``` +/user/alice => "/user/alice" (no match with "/user/:name") +/user/bob => "/user/:name" +/user/naoina/1 => "/user/:name/1" +/user/alice/1 => "/user/alice/:id" (no match with "/user/:name/:id") +/user/1/bob => "/user/:id/bob" (no match with "/user/:name/:id") +/user/alice/bob => "/user/alice/:id" (no match with "/user/:name/:id" and "/user/:id/bob") +``` + +## Limitation + +Denco has some limitations below. + +* Number of param records (such as `/:name`) must be less than 2^22 +* Number of elements of internal slice must be less than 2^22 + +## Benchmarks + + cd $GOPATH/github.com/go-openapi/runtime/middleware/denco + go test -bench . -benchmem + +## License + +Denco is licensed under the MIT License. diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/router.go b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go new file mode 100644 index 000000000000..73703fddec38 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go @@ -0,0 +1,452 @@ +// Package denco provides fast URL router. +package denco + +import ( + "fmt" + "sort" + "strings" +) + +const ( + // ParamCharacter is a special character for path parameter. + ParamCharacter = ':' + + // WildcardCharacter is a special character for wildcard path parameter. + WildcardCharacter = '*' + + // TerminationCharacter is a special character for end of path. + TerminationCharacter = '#' + + // MaxSize is max size of records and internal slice. + MaxSize = (1 << 22) - 1 +) + +// Router represents a URL router. +type Router struct { + // SizeHint expects the maximum number of path parameters in records to Build. + // SizeHint will be used to determine the capacity of the memory to allocate. + // By default, SizeHint will be determined from given records to Build. + SizeHint int + + static map[string]interface{} + param *doubleArray +} + +// New returns a new Router. +func New() *Router { + return &Router{ + SizeHint: -1, + static: make(map[string]interface{}), + param: newDoubleArray(), + } +} + +// Lookup returns data and path parameters that associated with path. +// params is a slice of the Param that arranged in the order in which parameters appeared. +// e.g. when built routing path is "/path/to/:id/:name" and given path is "/path/to/1/alice". params order is [{"id": "1"}, {"name": "alice"}], not [{"name": "alice"}, {"id": "1"}]. +func (rt *Router) Lookup(path string) (data interface{}, params Params, found bool) { + if data, found := rt.static[path]; found { + return data, nil, true + } + if len(rt.param.node) == 1 { + return nil, nil, false + } + nd, params, found := rt.param.lookup(path, make([]Param, 0, rt.SizeHint), 1) + if !found { + return nil, nil, false + } + for i := 0; i < len(params); i++ { + params[i].Name = nd.paramNames[i] + } + return nd.data, params, true +} + +// Build builds URL router from records. +func (rt *Router) Build(records []Record) error { + statics, params := makeRecords(records) + if len(params) > MaxSize { + return fmt.Errorf("denco: too many records") + } + if rt.SizeHint < 0 { + rt.SizeHint = 0 + for _, p := range params { + size := 0 + for _, k := range p.Key { + if k == ParamCharacter || k == WildcardCharacter { + size++ + } + } + if size > rt.SizeHint { + rt.SizeHint = size + } + } + } + for _, r := range statics { + rt.static[r.Key] = r.Value + } + if err := rt.param.build(params, 1, 0, make(map[int]struct{})); err != nil { + return err + } + return nil +} + +// Param represents name and value of path parameter. +type Param struct { + Name string + Value string +} + +// Params represents the name and value of path parameters. +type Params []Param + +// Get gets the first value associated with the given name. +// If there are no values associated with the key, Get returns "". +func (ps Params) Get(name string) string { + for _, p := range ps { + if p.Name == name { + return p.Value + } + } + return "" +} + +type doubleArray struct { + bc []baseCheck + node []*node +} + +func newDoubleArray() *doubleArray { + return &doubleArray{ + bc: []baseCheck{0}, + node: []*node{nil}, // A start index is adjusting to 1 because 0 will be used as a mark of non-existent node. + } +} + +// baseCheck contains BASE, CHECK and Extra flags. +// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK. +// +// BASE (22bit) | Extra flags (2bit) | CHECK (8bit) +// |----------------------|--|--------| +// 32 10 8 0 +type baseCheck uint32 + +func (bc baseCheck) Base() int { + return int(bc >> 10) +} + +func (bc *baseCheck) SetBase(base int) { + *bc |= baseCheck(base) << 10 +} + +func (bc baseCheck) Check() byte { + return byte(bc) +} + +func (bc *baseCheck) SetCheck(check byte) { + *bc |= baseCheck(check) +} + +func (bc baseCheck) IsEmpty() bool { + return bc&0xfffffcff == 0 +} + +func (bc baseCheck) IsSingleParam() bool { + return bc¶mTypeSingle == paramTypeSingle +} + +func (bc baseCheck) IsWildcardParam() bool { + return bc¶mTypeWildcard == paramTypeWildcard +} + +func (bc baseCheck) IsAnyParam() bool { + return bc¶mTypeAny != 0 +} + +func (bc *baseCheck) SetSingleParam() { + *bc |= (1 << 8) +} + +func (bc *baseCheck) SetWildcardParam() { + *bc |= (1 << 9) +} + +const ( + paramTypeSingle = 0x0100 + paramTypeWildcard = 0x0200 + paramTypeAny = 0x0300 +) + +func (da *doubleArray) lookup(path string, params []Param, idx int) (*node, []Param, bool) { + indices := make([]uint64, 0, 1) + for i := 0; i < len(path); i++ { + if da.bc[idx].IsAnyParam() { + indices = append(indices, (uint64(i)<<32)|(uint64(idx)&0xffffffff)) + } + c := path[i] + if idx = nextIndex(da.bc[idx].Base(), c); idx >= len(da.bc) || da.bc[idx].Check() != c { + goto BACKTRACKING + } + } + if next := nextIndex(da.bc[idx].Base(), TerminationCharacter); next < len(da.bc) && da.bc[next].Check() == TerminationCharacter { + return da.node[da.bc[next].Base()], params, true + } +BACKTRACKING: + for j := len(indices) - 1; j >= 0; j-- { + i, idx := int(indices[j]>>32), int(indices[j]&0xffffffff) + if da.bc[idx].IsSingleParam() { + idx := nextIndex(da.bc[idx].Base(), ParamCharacter) + if idx >= len(da.bc) { + break + } + next := NextSeparator(path, i) + params := append(params, Param{Value: path[i:next]}) + if nd, params, found := da.lookup(path[next:], params, idx); found { + return nd, params, true + } + } + if da.bc[idx].IsWildcardParam() { + idx := nextIndex(da.bc[idx].Base(), WildcardCharacter) + params := append(params, Param{Value: path[i:]}) + return da.node[da.bc[idx].Base()], params, true + } + } + return nil, nil, false +} + +// build builds double-array from records. +func (da *doubleArray) build(srcs []*record, idx, depth int, usedBase map[int]struct{}) error { + sort.Stable(recordSlice(srcs)) + base, siblings, leaf, err := da.arrange(srcs, idx, depth, usedBase) + if err != nil { + return err + } + if leaf != nil { + nd, err := makeNode(leaf) + if err != nil { + return err + } + da.bc[idx].SetBase(len(da.node)) + da.node = append(da.node, nd) + } + for _, sib := range siblings { + da.setCheck(nextIndex(base, sib.c), sib.c) + } + for _, sib := range siblings { + records := srcs[sib.start:sib.end] + switch sib.c { + case ParamCharacter: + for _, r := range records { + next := NextSeparator(r.Key, depth+1) + name := r.Key[depth+1 : next] + r.paramNames = append(r.paramNames, name) + r.Key = r.Key[next:] + } + da.bc[idx].SetSingleParam() + if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil { + return err + } + case WildcardCharacter: + r := records[0] + name := r.Key[depth+1 : len(r.Key)-1] + r.paramNames = append(r.paramNames, name) + r.Key = "" + da.bc[idx].SetWildcardParam() + if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil { + return err + } + default: + if err := da.build(records, nextIndex(base, sib.c), depth+1, usedBase); err != nil { + return err + } + } + } + return nil +} + +// setBase sets BASE. +func (da *doubleArray) setBase(i, base int) { + da.bc[i].SetBase(base) +} + +// setCheck sets CHECK. +func (da *doubleArray) setCheck(i int, check byte) { + da.bc[i].SetCheck(check) +} + +// findEmptyIndex returns an index of unused BASE/CHECK node. +func (da *doubleArray) findEmptyIndex(start int) int { + i := start + for ; i < len(da.bc); i++ { + if da.bc[i].IsEmpty() { + break + } + } + return i +} + +// findBase returns good BASE. +func (da *doubleArray) findBase(siblings []sibling, start int, usedBase map[int]struct{}) (base int) { + for idx, firstChar := start+1, siblings[0].c; ; idx = da.findEmptyIndex(idx + 1) { + base = nextIndex(idx, firstChar) + if _, used := usedBase[base]; used { + continue + } + i := 0 + for ; i < len(siblings); i++ { + next := nextIndex(base, siblings[i].c) + if len(da.bc) <= next { + da.bc = append(da.bc, make([]baseCheck, next-len(da.bc)+1)...) + } + if !da.bc[next].IsEmpty() { + break + } + } + if i == len(siblings) { + break + } + } + usedBase[base] = struct{}{} + return base +} + +func (da *doubleArray) arrange(records []*record, idx, depth int, usedBase map[int]struct{}) (base int, siblings []sibling, leaf *record, err error) { + siblings, leaf, err = makeSiblings(records, depth) + if err != nil { + return -1, nil, nil, err + } + if len(siblings) < 1 { + return -1, nil, leaf, nil + } + base = da.findBase(siblings, idx, usedBase) + if base > MaxSize { + return -1, nil, nil, fmt.Errorf("denco: too many elements of internal slice") + } + da.setBase(idx, base) + return base, siblings, leaf, err +} + +// node represents a node of Double-Array. +type node struct { + data interface{} + + // Names of path parameters. + paramNames []string +} + +// makeNode returns a new node from record. +func makeNode(r *record) (*node, error) { + dups := make(map[string]bool) + for _, name := range r.paramNames { + if dups[name] { + return nil, fmt.Errorf("denco: path parameter `%v' is duplicated in the key `%v'", name, r.Key) + } + dups[name] = true + } + return &node{data: r.Value, paramNames: r.paramNames}, nil +} + +// sibling represents an intermediate data of build for Double-Array. +type sibling struct { + // An index of start of duplicated characters. + start int + + // An index of end of duplicated characters. + end int + + // A character of sibling. + c byte +} + +// nextIndex returns a next index of array of BASE/CHECK. +func nextIndex(base int, c byte) int { + return base ^ int(c) +} + +// makeSiblings returns slice of sibling. +func makeSiblings(records []*record, depth int) (sib []sibling, leaf *record, err error) { + var ( + pc byte + n int + ) + for i, r := range records { + if len(r.Key) <= depth { + leaf = r + continue + } + c := r.Key[depth] + switch { + case pc < c: + sib = append(sib, sibling{start: i, c: c}) + case pc == c: + continue + default: + return nil, nil, fmt.Errorf("denco: BUG: routing table hasn't been sorted") + } + if n > 0 { + sib[n-1].end = i + } + pc = c + n++ + } + if n == 0 { + return nil, leaf, nil + } + sib[n-1].end = len(records) + return sib, leaf, nil +} + +// Record represents a record data for router construction. +type Record struct { + // Key for router construction. + Key string + + // Result value for Key. + Value interface{} +} + +// NewRecord returns a new Record. +func NewRecord(key string, value interface{}) Record { + return Record{ + Key: key, + Value: value, + } +} + +// record represents a record that use to build the Double-Array. +type record struct { + Record + paramNames []string +} + +// makeRecords returns the records that use to build Double-Arrays. +func makeRecords(srcs []Record) (statics, params []*record) { + spChars := string([]byte{ParamCharacter, WildcardCharacter}) + termChar := string(TerminationCharacter) + for _, r := range srcs { + if strings.ContainsAny(r.Key, spChars) { + r.Key += termChar + params = append(params, &record{Record: r}) + } else { + statics = append(statics, &record{Record: r}) + } + } + return statics, params +} + +// recordSlice represents a slice of Record for sort and implements the sort.Interface. +type recordSlice []*record + +// Len implements the sort.Interface.Len. +func (rs recordSlice) Len() int { + return len(rs) +} + +// Less implements the sort.Interface.Less. +func (rs recordSlice) Less(i, j int) bool { + return rs[i].Key < rs[j].Key +} + +// Swap implements the sort.Interface.Swap. +func (rs recordSlice) Swap(i, j int) { + rs[i], rs[j] = rs[j], rs[i] +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/server.go b/vendor/github.com/go-openapi/runtime/middleware/denco/server.go new file mode 100644 index 000000000000..0886713c181d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/server.go @@ -0,0 +1,106 @@ +package denco + +import ( + "net/http" +) + +// Mux represents a multiplexer for HTTP request. +type Mux struct{} + +// NewMux returns a new Mux. +func NewMux() *Mux { + return &Mux{} +} + +// GET is shorthand of Mux.Handler("GET", path, handler). +func (m *Mux) GET(path string, handler HandlerFunc) Handler { + return m.Handler("GET", path, handler) +} + +// POST is shorthand of Mux.Handler("POST", path, handler). +func (m *Mux) POST(path string, handler HandlerFunc) Handler { + return m.Handler("POST", path, handler) +} + +// PUT is shorthand of Mux.Handler("PUT", path, handler). +func (m *Mux) PUT(path string, handler HandlerFunc) Handler { + return m.Handler("PUT", path, handler) +} + +// HEAD is shorthand of Mux.Handler("HEAD", path, handler). +func (m *Mux) HEAD(path string, handler HandlerFunc) Handler { + return m.Handler("HEAD", path, handler) +} + +// Handler returns a handler for HTTP method. +func (m *Mux) Handler(method, path string, handler HandlerFunc) Handler { + return Handler{ + Method: method, + Path: path, + Func: handler, + } +} + +// Build builds a http.Handler. +func (m *Mux) Build(handlers []Handler) (http.Handler, error) { + recordMap := make(map[string][]Record) + for _, h := range handlers { + recordMap[h.Method] = append(recordMap[h.Method], NewRecord(h.Path, h.Func)) + } + mux := newServeMux() + for m, records := range recordMap { + router := New() + if err := router.Build(records); err != nil { + return nil, err + } + mux.routers[m] = router + } + return mux, nil +} + +// Handler represents a handler of HTTP request. +type Handler struct { + // Method is an HTTP method. + Method string + + // Path is a routing path for handler. + Path string + + // Func is a function of handler of HTTP request. + Func HandlerFunc +} + +// The HandlerFunc type is aliased to type of handler function. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, params Params) + +type serveMux struct { + routers map[string]*Router +} + +func newServeMux() *serveMux { + return &serveMux{ + routers: make(map[string]*Router), + } +} + +// ServeHTTP implements http.Handler interface. +func (mux *serveMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + handler, params := mux.handler(r.Method, r.URL.Path) + handler(w, r, params) +} + +func (mux *serveMux) handler(method, path string) (HandlerFunc, []Param) { + if router, found := mux.routers[method]; found { + if handler, params, found := router.Lookup(path); found { + return handler.(HandlerFunc), params + } + } + return NotFound, nil +} + +// NotFound replies to the request with an HTTP 404 not found error. +// NotFound is called when unknown HTTP method or a handler not found. +// If you want to use the your own NotFound handler, please overwrite this variable. +var NotFound = func(w http.ResponseWriter, r *http.Request, _ Params) { + http.NotFound(w, r) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/util.go b/vendor/github.com/go-openapi/runtime/middleware/denco/util.go new file mode 100644 index 000000000000..edc1f6ab80a9 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/denco/util.go @@ -0,0 +1,12 @@ +package denco + +// NextSeparator returns an index of next separator in path. +func NextSeparator(path string, start int) int { + for start < len(path) { + if c := path[start]; c == '/' || c == TerminationCharacter { + break + } + start++ + } + return start +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/doc.go b/vendor/github.com/go-openapi/runtime/middleware/doc.go new file mode 100644 index 000000000000..eaf90606ac32 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/doc.go @@ -0,0 +1,62 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/*Package middleware provides the library with helper functions for serving swagger APIs. + +Pseudo middleware handler + + import ( + "net/http" + + "github.com/go-openapi/errors" + ) + + func newCompleteMiddleware(ctx *Context) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + // use context to lookup routes + if matched, ok := ctx.RouteInfo(r); ok { + + if matched.NeedsAuth() { + if _, err := ctx.Authorize(r, matched); err != nil { + ctx.Respond(rw, r, matched.Produces, matched, err) + return + } + } + + bound, validation := ctx.BindAndValidate(r, matched) + if validation != nil { + ctx.Respond(rw, r, matched.Produces, matched, validation) + return + } + + result, err := matched.Handler.Handle(bound) + if err != nil { + ctx.Respond(rw, r, matched.Produces, matched, err) + return + } + + ctx.Respond(rw, r, matched.Produces, matched, result) + return + } + + // Not found, check if it exists in the other methods first + if others := ctx.AllowedMethods(r); len(others) > 0 { + ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) + return + } + ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path)) + }) + } +*/ +package middleware diff --git a/vendor/github.com/go-openapi/runtime/middleware/go18.go b/vendor/github.com/go-openapi/runtime/middleware/go18.go new file mode 100644 index 000000000000..75c762c09486 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/go18.go @@ -0,0 +1,9 @@ +// +build go1.8 + +package middleware + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.PathUnescape(path) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/header/header.go b/vendor/github.com/go-openapi/runtime/middleware/header/header.go new file mode 100644 index 000000000000..3e342258bca6 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/header/header.go @@ -0,0 +1,326 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// this file was taken from the github.com/golang/gddo repository + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" + "time" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// Copy returns a shallow copy of the header. +func Copy(header http.Header) http.Header { + h := make(http.Header) + for k, vs := range header { + h[k] = vs + } + return h +} + +var timeLayouts = []string{"Mon, 02 Jan 2006 15:04:05 GMT", time.RFC850, time.ANSIC} + +// ParseTime parses the header as time. The zero value is returned if the +// header is not present or there is an error parsing the +// header. +func ParseTime(header http.Header, key string) time.Time { + if s := header.Get(key); s != "" { + for _, layout := range timeLayouts { + if t, err := time.Parse(layout, s); err == nil { + return t.UTC() + } + } + } + return time.Time{} +} + +// ParseList parses a comma separated list of values. Commas are ignored in +// quoted strings. Quoted values are not unescaped or unquoted. Whitespace is +// trimmed. +func ParseList(header http.Header, key string) []string { + var result []string + for _, s := range header[http.CanonicalHeaderKey(key)] { + begin := 0 + end := 0 + escape := false + quote := false + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + end = i + 1 + case quote: + switch b { + case '\\': + escape = true + case '"': + quote = false + } + end = i + 1 + case b == '"': + quote = true + end = i + 1 + case octetTypes[b]&isSpace != 0: + if begin == end { + begin = i + 1 + end = begin + } + case b == ',': + if begin < end { + result = append(result, s[begin:end]) + } + begin = i + 1 + end = begin + default: + end = i + 1 + } + } + if begin < end { + result = append(result, s[begin:end]) + } + } + return result +} + +// ParseValueAndParams parses a comma separated list of values with optional +// semicolon separated name-value pairs. Content-Type and Content-Disposition +// headers are in this format. +func ParseValueAndParams(header http.Header, key string) (string, map[string]string) { + return parseValueAndParams(header.Get(key)) +} + +func parseValueAndParams(s string) (value string, params map[string]string) { + params = make(map[string]string) + value, s = expectTokenSlash(s) + if value == "" { + return + } + value = strings.ToLower(value) + s = skipSpace(s) + for strings.HasPrefix(s, ";") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +// AcceptSpec ... +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept2 ... +func ParseAccept2(header http.Header, key string) (specs []AcceptSpec) { + for _, en := range ParseList(header, key) { + v, p := parseValueAndParams(en) + var spec AcceptSpec + spec.Value = v + spec.Q = 1.0 + if p != nil { + if q, ok := p["q"]; ok { + spec.Q, _ = expectQuality(q) + } + } + if spec.Q < 0.0 { + continue + } + specs = append(specs, spec) + } + + return +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + for !strings.HasPrefix(s, "q=") && s != "" && !strings.HasPrefix(s, ",") { + s = skipSpace(s[1:]) + } + if strings.HasPrefix(s, "q=") { + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/negotiate.go b/vendor/github.com/go-openapi/runtime/middleware/negotiate.go new file mode 100644 index 000000000000..a9b6f27d3d37 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/negotiate.go @@ -0,0 +1,98 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// this file was taken from the github.com/golang/gddo repository + +package middleware + +import ( + "net/http" + "strings" + + "github.com/go-openapi/runtime/middleware/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} + +// NegotiateContentType returns the best offered content type for the request's +// Accept header. If two offers match with equal weight, then the more specific +// offer is preferred. For example, text/* trumps */*. If two offers match +// with equal weight and specificity, then the offer earlier in the list is +// preferred. If no offers match, then defaultOffer is returned. +func NegotiateContentType(r *http.Request, offers []string, defaultOffer string) string { + bestOffer := defaultOffer + bestQ := -1.0 + bestWild := 3 + specs := header.ParseAccept(r.Header, "Accept") + for _, rawOffer := range offers { + offer := normalizeOffer(rawOffer) + // No Accept header: just return the first offer. + if len(specs) == 0 { + return rawOffer + } + for _, spec := range specs { + switch { + case spec.Q == 0.0: + // ignore + case spec.Q < bestQ: + // better match found + case spec.Value == "*/*": + if spec.Q > bestQ || bestWild > 2 { + bestQ = spec.Q + bestWild = 2 + bestOffer = rawOffer + } + case strings.HasSuffix(spec.Value, "/*"): + if strings.HasPrefix(offer, spec.Value[:len(spec.Value)-1]) && + (spec.Q > bestQ || bestWild > 1) { + bestQ = spec.Q + bestWild = 1 + bestOffer = rawOffer + } + default: + if spec.Value == offer && + (spec.Q > bestQ || bestWild > 0) { + bestQ = spec.Q + bestWild = 0 + bestOffer = rawOffer + } + } + } + } + return bestOffer +} + +func normalizeOffers(orig []string) (norm []string) { + for _, o := range orig { + norm = append(norm, normalizeOffer(o)) + } + return +} + +func normalizeOffer(orig string) string { + return strings.SplitN(orig, ";", 2)[0] +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go b/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go new file mode 100644 index 000000000000..466f553db48d --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go @@ -0,0 +1,48 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "net/http" + + "github.com/go-openapi/runtime" +) + +type errorResp struct { + code int + response interface{} + headers http.Header +} + +func (e *errorResp) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + for k, v := range e.headers { + for _, val := range v { + rw.Header().Add(k, val) + } + } + if e.code > 0 { + rw.WriteHeader(e.code) + } else { + rw.WriteHeader(http.StatusInternalServerError) + } + if err := producer.Produce(rw, e.response); err != nil { + panic(err) + } +} + +// NotImplemented the error response when the response is not implemented +func NotImplemented(message string) Responder { + return &errorResp{http.StatusNotImplemented, message, make(http.Header)} +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/operation.go b/vendor/github.com/go-openapi/runtime/middleware/operation.go new file mode 100644 index 000000000000..1175a63cf29b --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/operation.go @@ -0,0 +1,30 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import "net/http" + +// NewOperationExecutor creates a context aware middleware that handles the operations after routing +func NewOperationExecutor(ctx *Context) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + // use context to lookup routes + route, rCtx, _ := ctx.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + + route.Handler.ServeHTTP(rw, r) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/parameter.go b/vendor/github.com/go-openapi/runtime/middleware/parameter.go new file mode 100644 index 000000000000..8975b6e1c891 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/parameter.go @@ -0,0 +1,480 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +const defaultMaxMemory = 32 << 20 + +var textUnmarshalType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() + +func newUntypedParamBinder(param spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedParamBinder { + binder := new(untypedParamBinder) + binder.Name = param.Name + binder.parameter = ¶m + binder.formats = formats + if param.In != "body" { + binder.validator = validate.NewParamValidator(¶m, formats) + } else { + binder.validator = validate.NewSchemaValidator(param.Schema, spec, param.Name, formats) + } + + return binder +} + +type untypedParamBinder struct { + parameter *spec.Parameter + formats strfmt.Registry + Name string + validator validate.EntityValidator +} + +func (p *untypedParamBinder) Type() reflect.Type { + return p.typeForSchema(p.parameter.Type, p.parameter.Format, p.parameter.Items) +} + +func (p *untypedParamBinder) typeForSchema(tpe, format string, items *spec.Items) reflect.Type { + switch tpe { + case "boolean": + return reflect.TypeOf(true) + + case "string": + if tt, ok := p.formats.GetType(format); ok { + return tt + } + return reflect.TypeOf("") + + case "integer": + switch format { + case "int8": + return reflect.TypeOf(int8(0)) + case "int16": + return reflect.TypeOf(int16(0)) + case "int32": + return reflect.TypeOf(int32(0)) + case "int64": + return reflect.TypeOf(int64(0)) + default: + return reflect.TypeOf(int64(0)) + } + + case "number": + switch format { + case "float": + return reflect.TypeOf(float32(0)) + case "double": + return reflect.TypeOf(float64(0)) + } + + case "array": + if items == nil { + return nil + } + itemsType := p.typeForSchema(items.Type, items.Format, items.Items) + if itemsType == nil { + return nil + } + return reflect.MakeSlice(reflect.SliceOf(itemsType), 0, 0).Type() + + case "file": + return reflect.TypeOf(&runtime.File{}).Elem() + + case "object": + return reflect.TypeOf(map[string]interface{}{}) + } + return nil +} + +func (p *untypedParamBinder) allowsMulti() bool { + return p.parameter.In == "query" || p.parameter.In == "formData" +} + +func (p *untypedParamBinder) readValue(values runtime.Gettable, target reflect.Value) ([]string, bool, bool, error) { + name, in, cf, tpe := p.parameter.Name, p.parameter.In, p.parameter.CollectionFormat, p.parameter.Type + if tpe == "array" { + if cf == "multi" { + if !p.allowsMulti() { + return nil, false, false, errors.InvalidCollectionFormat(name, in, cf) + } + vv, hasKey, _ := values.GetOK(name) + return vv, false, hasKey, nil + } + + v, hk, hv := values.GetOK(name) + if !hv { + return nil, false, hk, nil + } + d, c, e := p.readFormattedSliceFieldValue(v[len(v)-1], target) + return d, c, hk, e + } + + vv, hk, _ := values.GetOK(name) + return vv, false, hk, nil +} + +func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, target reflect.Value) error { + // fmt.Println("binding", p.name, "as", p.Type()) + switch p.parameter.In { + case "query": + data, custom, hasKey, err := p.readValue(runtime.Values(request.URL.Query()), target) + if err != nil { + return err + } + if custom { + return nil + } + + return p.bindValue(data, hasKey, target) + + case "header": + data, custom, hasKey, err := p.readValue(runtime.Values(request.Header), target) + if err != nil { + return err + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + + case "path": + data, custom, hasKey, err := p.readValue(routeParams, target) + if err != nil { + return err + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + + case "formData": + var err error + var mt string + + mt, _, e := runtime.ContentType(request.Header) + if e != nil { + // because of the interface conversion go thinks the error is not nil + // so we first check for nil and then set the err var if it's not nil + err = e + } + + if err != nil { + return errors.InvalidContentType("", []string{"multipart/form-data", "application/x-www-form-urlencoded"}) + } + + if mt != "multipart/form-data" && mt != "application/x-www-form-urlencoded" { + return errors.InvalidContentType(mt, []string{"multipart/form-data", "application/x-www-form-urlencoded"}) + } + + if mt == "multipart/form-data" { + if err = request.ParseMultipartForm(defaultMaxMemory); err != nil { + return errors.NewParseError(p.Name, p.parameter.In, "", err) + } + } + + if err = request.ParseForm(); err != nil { + return errors.NewParseError(p.Name, p.parameter.In, "", err) + } + + if p.parameter.Type == "file" { + file, header, ffErr := request.FormFile(p.parameter.Name) + if ffErr != nil { + return errors.NewParseError(p.Name, p.parameter.In, "", ffErr) + } + target.Set(reflect.ValueOf(runtime.File{Data: file, Header: header})) + return nil + } + + if request.MultipartForm != nil { + data, custom, hasKey, rvErr := p.readValue(runtime.Values(request.MultipartForm.Value), target) + if rvErr != nil { + return rvErr + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + } + data, custom, hasKey, err := p.readValue(runtime.Values(request.PostForm), target) + if err != nil { + return err + } + if custom { + return nil + } + return p.bindValue(data, hasKey, target) + + case "body": + newValue := reflect.New(target.Type()) + if !runtime.HasBody(request) { + if p.parameter.Default != nil { + target.Set(reflect.ValueOf(p.parameter.Default)) + } + + return nil + } + if err := consumer.Consume(request.Body, newValue.Interface()); err != nil { + if err == io.EOF && p.parameter.Default != nil { + target.Set(reflect.ValueOf(p.parameter.Default)) + return nil + } + tpe := p.parameter.Type + if p.parameter.Format != "" { + tpe = p.parameter.Format + } + return errors.InvalidType(p.Name, p.parameter.In, tpe, nil) + } + target.Set(reflect.Indirect(newValue)) + return nil + default: + return errors.New(500, fmt.Sprintf("invalid parameter location %q", p.parameter.In)) + } +} + +func (p *untypedParamBinder) bindValue(data []string, hasKey bool, target reflect.Value) error { + if p.parameter.Type == "array" { + return p.setSliceFieldValue(target, p.parameter.Default, data, hasKey) + } + var d string + if len(data) > 0 { + d = data[len(data)-1] + } + return p.setFieldValue(target, p.parameter.Default, d, hasKey) +} + +func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue interface{}, data string, hasKey bool) error { + tpe := p.parameter.Type + if p.parameter.Format != "" { + tpe = p.parameter.Format + } + + if (!hasKey || (!p.parameter.AllowEmptyValue && data == "")) && p.parameter.Required && p.parameter.Default == nil { + return errors.Required(p.Name, p.parameter.In) + } + + ok, err := p.tryUnmarshaler(target, defaultValue, data) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if ok { + return nil + } + + defVal := reflect.Zero(target.Type()) + if defaultValue != nil { + defVal = reflect.ValueOf(defaultValue) + } + + if tpe == "byte" { + if data == "" { + if target.CanSet() { + target.SetBytes(defVal.Bytes()) + } + return nil + } + + b, err := base64.StdEncoding.DecodeString(data) + if err != nil { + b, err = base64.URLEncoding.DecodeString(data) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + } + if target.CanSet() { + target.SetBytes(b) + } + return nil + } + + switch target.Kind() { + case reflect.Bool: + if data == "" { + if target.CanSet() { + target.SetBool(defVal.Bool()) + } + return nil + } + b, err := swag.ConvertBool(data) + if err != nil { + return err + } + if target.CanSet() { + target.SetBool(b) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if data == "" { + if target.CanSet() { + rd := defVal.Convert(reflect.TypeOf(int64(0))) + target.SetInt(rd.Int()) + } + return nil + } + i, err := strconv.ParseInt(data, 10, 64) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.OverflowInt(i) { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.CanSet() { + target.SetInt(i) + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if data == "" { + if target.CanSet() { + rd := defVal.Convert(reflect.TypeOf(uint64(0))) + target.SetUint(rd.Uint()) + } + return nil + } + u, err := strconv.ParseUint(data, 10, 64) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.OverflowUint(u) { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.CanSet() { + target.SetUint(u) + } + + case reflect.Float32, reflect.Float64: + if data == "" { + if target.CanSet() { + rd := defVal.Convert(reflect.TypeOf(float64(0))) + target.SetFloat(rd.Float()) + } + return nil + } + f, err := strconv.ParseFloat(data, 64) + if err != nil { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.OverflowFloat(f) { + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + if target.CanSet() { + target.SetFloat(f) + } + + case reflect.String: + value := data + if value == "" { + value = defVal.String() + } + // validate string + if target.CanSet() { + target.SetString(value) + } + + case reflect.Ptr: + if data == "" && defVal.Kind() == reflect.Ptr { + if target.CanSet() { + target.Set(defVal) + } + return nil + } + newVal := reflect.New(target.Type().Elem()) + if err := p.setFieldValue(reflect.Indirect(newVal), defVal, data, hasKey); err != nil { + return err + } + if target.CanSet() { + target.Set(newVal) + } + + default: + return errors.InvalidType(p.Name, p.parameter.In, tpe, data) + } + return nil +} + +func (p *untypedParamBinder) tryUnmarshaler(target reflect.Value, defaultValue interface{}, data string) (bool, error) { + if !target.CanSet() { + return false, nil + } + // When a type implements encoding.TextUnmarshaler we'll use that instead of reflecting some more + if reflect.PtrTo(target.Type()).Implements(textUnmarshalType) { + if defaultValue != nil && len(data) == 0 { + target.Set(reflect.ValueOf(defaultValue)) + return true, nil + } + value := reflect.New(target.Type()) + if err := value.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(data)); err != nil { + return true, err + } + target.Set(reflect.Indirect(value)) + return true, nil + } + return false, nil +} + +func (p *untypedParamBinder) readFormattedSliceFieldValue(data string, target reflect.Value) ([]string, bool, error) { + ok, err := p.tryUnmarshaler(target, p.parameter.Default, data) + if err != nil { + return nil, true, err + } + if ok { + return nil, true, nil + } + + return swag.SplitByFormat(data, p.parameter.CollectionFormat), false, nil +} + +func (p *untypedParamBinder) setSliceFieldValue(target reflect.Value, defaultValue interface{}, data []string, hasKey bool) error { + sz := len(data) + if (!hasKey || (!p.parameter.AllowEmptyValue && (sz == 0 || (sz == 1 && data[0] == "")))) && p.parameter.Required && defaultValue == nil { + return errors.Required(p.Name, p.parameter.In) + } + + defVal := reflect.Zero(target.Type()) + if defaultValue != nil { + defVal = reflect.ValueOf(defaultValue) + } + + if !target.CanSet() { + return nil + } + if sz == 0 { + target.Set(defVal) + return nil + } + + value := reflect.MakeSlice(reflect.SliceOf(target.Type().Elem()), sz, sz) + + for i := 0; i < sz; i++ { + if err := p.setFieldValue(value.Index(i), nil, data[i], hasKey); err != nil { + return err + } + } + + target.Set(value) + + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go b/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go new file mode 100644 index 000000000000..03385251e195 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go @@ -0,0 +1,9 @@ +// +build !go1.8 + +package middleware + +import "net/url" + +func pathUnescape(path string) (string, error) { + return url.QueryUnescape(path) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/redoc.go b/vendor/github.com/go-openapi/runtime/middleware/redoc.go new file mode 100644 index 000000000000..21277948c0c4 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/redoc.go @@ -0,0 +1,101 @@ +package middleware + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + "path" +) + +// RedocOpts configures the Redoc middlewares +type RedocOpts struct { + // BasePath for the UI path, defaults to: / + BasePath string + // Path combines with BasePath for the full UI path, defaults to: docs + Path string + // SpecURL the url to find the spec for + SpecURL string + // RedocURL for the js that generates the redoc site, defaults to: https://rebilly.github.io/ReDoc/releases/latest/redoc.min.js + RedocURL string + // Title for the documentation site, default to: API documentation + Title string +} + +// EnsureDefaults in case some options are missing +func (r *RedocOpts) EnsureDefaults() { + if r.BasePath == "" { + r.BasePath = "/" + } + if r.Path == "" { + r.Path = "docs" + } + if r.SpecURL == "" { + r.SpecURL = "/swagger.json" + } + if r.RedocURL == "" { + r.RedocURL = redocLatest + } + if r.Title == "" { + r.Title = "API documentation" + } +} + +// Redoc creates a middleware to serve a documentation site for a swagger spec. +// This allows for altering the spec before starting the http listener. +// +func Redoc(opts RedocOpts, next http.Handler) http.Handler { + opts.EnsureDefaults() + + pth := path.Join(opts.BasePath, opts.Path) + tmpl := template.Must(template.New("redoc").Parse(redocTemplate)) + + buf := bytes.NewBuffer(nil) + _ = tmpl.Execute(buf, opts) + b := buf.Bytes() + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if r.URL.Path == pth { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.WriteHeader(http.StatusOK) + + _, _ = rw.Write(b) + return + } + + if next == nil { + rw.Header().Set("Content-Type", "text/plain") + rw.WriteHeader(http.StatusNotFound) + _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) + return + } + next.ServeHTTP(rw, r) + }) +} + +const ( + redocLatest = "https://rebilly.github.io/ReDoc/releases/latest/redoc.min.js" + redocTemplate = ` + + + {{ .Title }} + + + + + + + + + + + +` +) diff --git a/vendor/github.com/go-openapi/runtime/middleware/request.go b/vendor/github.com/go-openapi/runtime/middleware/request.go new file mode 100644 index 000000000000..ee725f587a3c --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/request.go @@ -0,0 +1,104 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "net/http" + "reflect" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// RequestBinder binds and validates the data from a http request +type untypedRequestBinder struct { + Spec *spec.Swagger + Parameters map[string]spec.Parameter + Formats strfmt.Registry + paramBinders map[string]*untypedParamBinder +} + +// NewRequestBinder creates a new binder for reading a request. +func newUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedRequestBinder { + binders := make(map[string]*untypedParamBinder) + for fieldName, param := range parameters { + binders[fieldName] = newUntypedParamBinder(param, spec, formats) + } + return &untypedRequestBinder{ + Parameters: parameters, + paramBinders: binders, + Spec: spec, + Formats: formats, + } +} + +// Bind perform the databinding and validation +func (o *untypedRequestBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, data interface{}) error { + val := reflect.Indirect(reflect.ValueOf(data)) + isMap := val.Kind() == reflect.Map + var result []error + debugLog("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath()) + for fieldName, param := range o.Parameters { + binder := o.paramBinders[fieldName] + debugLog("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath()) + var target reflect.Value + if !isMap { + binder.Name = fieldName + target = val.FieldByName(fieldName) + } + + if isMap { + tpe := binder.Type() + if tpe == nil { + if param.Schema.Type.Contains("array") { + tpe = reflect.TypeOf([]interface{}{}) + } else { + tpe = reflect.TypeOf(map[string]interface{}{}) + } + } + target = reflect.Indirect(reflect.New(tpe)) + + } + + if !target.IsValid() { + result = append(result, errors.New(500, "parameter name %q is an unknown field", binder.Name)) + continue + } + + if err := binder.Bind(request, routeParams, consumer, target); err != nil { + result = append(result, err) + continue + } + + if binder.validator != nil { + rr := binder.validator.Validate(target.Interface()) + if rr != nil && rr.HasErrors() { + result = append(result, rr.AsError()) + } + } + + if isMap { + val.SetMapIndex(reflect.ValueOf(param.Name), target) + } + } + + if len(result) > 0 { + return errors.CompositeValidationError(result...) + } + + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/router.go b/vendor/github.com/go-openapi/runtime/middleware/router.go new file mode 100644 index 000000000000..539d8471a83c --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/router.go @@ -0,0 +1,477 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "fmt" + "net/http" + fpath "path" + "regexp" + "strings" + + "github.com/go-openapi/runtime/security" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware/denco" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// RouteParam is a object to capture route params in a framework agnostic way. +// implementations of the muxer should use these route params to communicate with the +// swagger framework +type RouteParam struct { + Name string + Value string +} + +// RouteParams the collection of route params +type RouteParams []RouteParam + +// Get gets the value for the route param for the specified key +func (r RouteParams) Get(name string) string { + vv, _, _ := r.GetOK(name) + if len(vv) > 0 { + return vv[len(vv)-1] + } + return "" +} + +// GetOK gets the value but also returns booleans to indicate if a key or value +// is present. This aids in validation and satisfies an interface in use there +// +// The returned values are: data, has key, has value +func (r RouteParams) GetOK(name string) ([]string, bool, bool) { + for _, p := range r { + if p.Name == name { + return []string{p.Value}, true, p.Value != "" + } + } + return nil, false, false +} + +// NewRouter creates a new context aware router middleware +func NewRouter(ctx *Context, next http.Handler) http.Handler { + if ctx.router == nil { + ctx.router = DefaultRouter(ctx.spec, ctx.api) + } + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if _, rCtx, ok := ctx.RouteInfo(r); ok { + next.ServeHTTP(rw, rCtx) + return + } + + // Not found, check if it exists in the other methods first + if others := ctx.AllowedMethods(r); len(others) > 0 { + ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) + return + } + + ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.EscapedPath())) + }) +} + +// RoutableAPI represents an interface for things that can serve +// as a provider of implementations for the swagger router +type RoutableAPI interface { + HandlerFor(string, string) (http.Handler, bool) + ServeErrorFor(string) func(http.ResponseWriter, *http.Request, error) + ConsumersFor([]string) map[string]runtime.Consumer + ProducersFor([]string) map[string]runtime.Producer + AuthenticatorsFor(map[string]spec.SecurityScheme) map[string]runtime.Authenticator + Authorizer() runtime.Authorizer + Formats() strfmt.Registry + DefaultProduces() string + DefaultConsumes() string +} + +// Router represents a swagger aware router +type Router interface { + Lookup(method, path string) (*MatchedRoute, bool) + OtherMethods(method, path string) []string +} + +type defaultRouteBuilder struct { + spec *loads.Document + analyzer *analysis.Spec + api RoutableAPI + records map[string][]denco.Record +} + +type defaultRouter struct { + spec *loads.Document + routers map[string]*denco.Router +} + +func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI) *defaultRouteBuilder { + return &defaultRouteBuilder{ + spec: spec, + analyzer: analysis.New(spec.Spec()), + api: api, + records: make(map[string][]denco.Record), + } +} + +// DefaultRouter creates a default implemenation of the router +func DefaultRouter(spec *loads.Document, api RoutableAPI) Router { + builder := newDefaultRouteBuilder(spec, api) + if spec != nil { + for method, paths := range builder.analyzer.Operations() { + for path, operation := range paths { + fp := fpath.Join(spec.BasePath(), path) + debugLog("adding route %s %s %q", method, fp, operation.ID) + builder.AddRoute(method, fp, operation) + } + } + } + return builder.Build() +} + +// RouteAuthenticator is an authenticator that can compose several authenticators together. +// It also knows when it contains an authenticator that allows for anonymous pass through. +// Contains a group of 1 or more authenticators that have a logical AND relationship +type RouteAuthenticator struct { + Authenticator map[string]runtime.Authenticator + Schemes []string + Scopes map[string][]string + allScopes []string + commonScopes []string + allowAnonymous bool +} + +func (ra *RouteAuthenticator) AllowsAnonymous() bool { + return ra.allowAnonymous +} + +// AllScopes returns a list of unique scopes that is the combination +// of all the scopes in the requirements +func (ra *RouteAuthenticator) AllScopes() []string { + return ra.allScopes +} + +// CommonScopes returns a list of unique scopes that are common in all the +// scopes in the requirements +func (ra *RouteAuthenticator) CommonScopes() []string { + return ra.commonScopes +} + +// Authenticate Authenticator interface implementation +func (ra *RouteAuthenticator) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) { + if ra.allowAnonymous { + route.Authenticator = ra + return true, nil, nil + } + // iterate in proper order + var lastResult interface{} + for _, scheme := range ra.Schemes { + if authenticator, ok := ra.Authenticator[scheme]; ok { + applies, princ, err := authenticator.Authenticate(&security.ScopedAuthRequest{ + Request: req, + RequiredScopes: ra.Scopes[scheme], + }) + if !applies { + return false, nil, nil + } + if err != nil { + route.Authenticator = ra + return true, nil, err + } + lastResult = princ + } + } + route.Authenticator = ra + return true, lastResult, nil +} + +func stringSliceUnion(slices ...[]string) []string { + unique := make(map[string]struct{}) + var result []string + for _, slice := range slices { + for _, entry := range slice { + if _, ok := unique[entry]; ok { + continue + } + unique[entry] = struct{}{} + result = append(result, entry) + } + } + return result +} + +func stringSliceIntersection(slices ...[]string) []string { + unique := make(map[string]int) + var intersection []string + + total := len(slices) + var emptyCnt int + for _, slice := range slices { + if len(slice) == 0 { + emptyCnt++ + continue + } + + for _, entry := range slice { + unique[entry]++ + if unique[entry] == total-emptyCnt { // this entry appeared in all the non-empty slices + intersection = append(intersection, entry) + } + } + } + + return intersection +} + +// RouteAuthenticators represents a group of authenticators that represent a logical OR +type RouteAuthenticators []RouteAuthenticator + +// AllowsAnonymous returns true when there is an authenticator that means optional auth +func (ras RouteAuthenticators) AllowsAnonymous() bool { + for _, ra := range ras { + if ra.AllowsAnonymous() { + return true + } + } + return false +} + +// Authenticate method implemention so this collection can be used as authenticator +func (ras RouteAuthenticators) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) { + var lastError error + var allowsAnon bool + var anonAuth RouteAuthenticator + + for _, ra := range ras { + if ra.AllowsAnonymous() { + anonAuth = ra + allowsAnon = true + continue + } + applies, usr, err := ra.Authenticate(req, route) + if !applies || err != nil || usr == nil { + if err != nil { + lastError = err + } + continue + } + return applies, usr, nil + } + + if allowsAnon && lastError == nil { + route.Authenticator = &anonAuth + return true, nil, lastError + } + return lastError != nil, nil, lastError +} + +type routeEntry struct { + PathPattern string + BasePath string + Operation *spec.Operation + Consumes []string + Consumers map[string]runtime.Consumer + Produces []string + Producers map[string]runtime.Producer + Parameters map[string]spec.Parameter + Handler http.Handler + Formats strfmt.Registry + Binder *untypedRequestBinder + Authenticators RouteAuthenticators + Authorizer runtime.Authorizer +} + +// MatchedRoute represents the route that was matched in this request +type MatchedRoute struct { + routeEntry + Params RouteParams + Consumer runtime.Consumer + Producer runtime.Producer + Authenticator *RouteAuthenticator +} + +// HasAuth returns true when the route has a security requirement defined +func (m *MatchedRoute) HasAuth() bool { + return len(m.Authenticators) > 0 +} + +// NeedsAuth returns true when the request still +// needs to perform authentication +func (m *MatchedRoute) NeedsAuth() bool { + return m.HasAuth() && m.Authenticator == nil +} + +func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) { + mth := strings.ToUpper(method) + debugLog("looking up route for %s %s", method, path) + if Debug { + if len(d.routers) == 0 { + debugLog("there are no known routers") + } + for meth := range d.routers { + debugLog("got a router for %s", meth) + } + } + if router, ok := d.routers[mth]; ok { + if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil { + if entry, ok := m.(*routeEntry); ok { + debugLog("found a route for %s %s with %d parameters", method, path, len(entry.Parameters)) + var params RouteParams + for _, p := range rp { + v, err := pathUnescape(p.Value) + if err != nil { + debugLog("failed to escape %q: %v", p.Value, err) + v = p.Value + } + // a workaround to handle fragment/composing parameters until they are supported in denco router + // check if this parameter is a fragment within a path segment + if xpos := strings.Index(entry.PathPattern, fmt.Sprintf("{%s}", p.Name)) + len(p.Name) + 2; xpos < len(entry.PathPattern) && entry.PathPattern[xpos] != '/' { + // extract fragment parameters + ep := strings.Split(entry.PathPattern[xpos:], "/")[0] + pnames, pvalues := decodeCompositParams(p.Name, v, ep, nil, nil) + for i, pname := range pnames { + params = append(params, RouteParam{Name: pname, Value: pvalues[i]}) + } + } else { + // use the parameter directly + params = append(params, RouteParam{Name: p.Name, Value: v}) + } + } + return &MatchedRoute{routeEntry: *entry, Params: params}, true + } + } else { + debugLog("couldn't find a route by path for %s %s", method, path) + } + } else { + debugLog("couldn't find a route by method for %s %s", method, path) + } + return nil, false +} + +func (d *defaultRouter) OtherMethods(method, path string) []string { + mn := strings.ToUpper(method) + var methods []string + for k, v := range d.routers { + if k != mn { + if _, _, ok := v.Lookup(fpath.Clean(path)); ok { + methods = append(methods, k) + continue + } + } + } + return methods +} + +// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco +var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`) + +func decodeCompositParams(name string, value string, pattern string, names []string, values []string) ([]string, []string) { + pleft := strings.Index(pattern, "{") + names = append(names, name) + if pleft < 0 { + if strings.HasSuffix(value, pattern) { + values = append(values, value[:len(value)-len(pattern)]) + } else { + values = append(values, "") + } + } else { + toskip := pattern[:pleft] + pright := strings.Index(pattern, "}") + vright := strings.Index(value, toskip) + if vright >= 0 { + values = append(values, value[:vright]) + } else { + values = append(values, "") + value = "" + } + return decodeCompositParams(pattern[pleft+1:pright], value[vright+len(toskip):], pattern[pright+1:], names, values) + } + return names, values +} + +func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Operation) { + mn := strings.ToUpper(method) + + bp := fpath.Clean(d.spec.BasePath()) + if len(bp) > 0 && bp[len(bp)-1] == '/' { + bp = bp[:len(bp)-1] + } + + debugLog("operation: %#v", *operation) + if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok { + consumes := d.analyzer.ConsumesFor(operation) + produces := d.analyzer.ProducesFor(operation) + parameters := d.analyzer.ParamsFor(method, strings.TrimPrefix(path, bp)) + + record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{ + BasePath: bp, + PathPattern: path, + Operation: operation, + Handler: handler, + Consumes: consumes, + Produces: produces, + Consumers: d.api.ConsumersFor(normalizeOffers(consumes)), + Producers: d.api.ProducersFor(normalizeOffers(produces)), + Parameters: parameters, + Formats: d.api.Formats(), + Binder: newUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()), + Authenticators: d.buildAuthenticators(operation), + Authorizer: d.api.Authorizer(), + }) + d.records[mn] = append(d.records[mn], record) + } +} + +func (d *defaultRouteBuilder) buildAuthenticators(operation *spec.Operation) RouteAuthenticators { + requirements := d.analyzer.SecurityRequirementsFor(operation) + var auths []RouteAuthenticator + for _, reqs := range requirements { + var schemes []string + scopes := make(map[string][]string, len(reqs)) + var scopeSlices [][]string + for _, req := range reqs { + schemes = append(schemes, req.Name) + scopes[req.Name] = req.Scopes + scopeSlices = append(scopeSlices, req.Scopes) + } + + definitions := d.analyzer.SecurityDefinitionsForRequirements(reqs) + authenticators := d.api.AuthenticatorsFor(definitions) + auths = append(auths, RouteAuthenticator{ + Authenticator: authenticators, + Schemes: schemes, + Scopes: scopes, + allScopes: stringSliceUnion(scopeSlices...), + commonScopes: stringSliceIntersection(scopeSlices...), + allowAnonymous: len(reqs) == 1 && reqs[0].Name == "", + }) + } + return auths +} + +func (d *defaultRouteBuilder) Build() *defaultRouter { + routers := make(map[string]*denco.Router) + for method, records := range d.records { + router := denco.New() + _ = router.Build(records) + routers[method] = router + } + return &defaultRouter{ + spec: d.spec, + routers: routers, + } +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/security.go b/vendor/github.com/go-openapi/runtime/middleware/security.go new file mode 100644 index 000000000000..2b061caefcb3 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/security.go @@ -0,0 +1,39 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import "net/http" + +func newSecureAPI(ctx *Context, next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := ctx.RouteInfo(r) + if rCtx != nil { + r = rCtx + } + if route != nil && !route.NeedsAuth() { + next.ServeHTTP(rw, r) + return + } + + _, rCtx, err := ctx.Authorize(r, route) + if err != nil { + ctx.Respond(rw, r, route.Produces, route, err) + return + } + r = rCtx + + next.ServeHTTP(rw, r) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/spec.go b/vendor/github.com/go-openapi/runtime/middleware/spec.go new file mode 100644 index 000000000000..f02914298060 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/spec.go @@ -0,0 +1,48 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "net/http" + "path" +) + +// Spec creates a middleware to serve a swagger spec. +// This allows for altering the spec before starting the http listener. +// This can be useful if you want to serve the swagger spec from another path than /swagger.json +// +func Spec(basePath string, b []byte, next http.Handler) http.Handler { + if basePath == "" { + basePath = "/" + } + pth := path.Join(basePath, "swagger.json") + + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + if r.URL.Path == pth { + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(http.StatusOK) + //#nosec + _, _ = rw.Write(b) + return + } + + if next == nil { + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(http.StatusNotFound) + return + } + next.ServeHTTP(rw, r) + }) +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go new file mode 100644 index 000000000000..8226b1ed1c6b --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go @@ -0,0 +1,286 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package untyped + +import ( + "fmt" + "net/http" + "sort" + "strings" + + "github.com/go-openapi/analysis" + "github.com/go-openapi/errors" + "github.com/go-openapi/loads" + "github.com/go-openapi/runtime" + "github.com/go-openapi/spec" + "github.com/go-openapi/strfmt" +) + +// NewAPI creates the default untyped API +func NewAPI(spec *loads.Document) *API { + var an *analysis.Spec + if spec != nil && spec.Spec() != nil { + an = analysis.New(spec.Spec()) + } + api := &API{ + spec: spec, + analyzer: an, + consumers: make(map[string]runtime.Consumer, 10), + producers: make(map[string]runtime.Producer, 10), + authenticators: make(map[string]runtime.Authenticator), + operations: make(map[string]map[string]runtime.OperationHandler), + ServeError: errors.ServeError, + Models: make(map[string]func() interface{}), + formats: strfmt.NewFormats(), + } + return api.WithJSONDefaults() +} + +// API represents an untyped mux for a swagger spec +type API struct { + spec *loads.Document + analyzer *analysis.Spec + DefaultProduces string + DefaultConsumes string + consumers map[string]runtime.Consumer + producers map[string]runtime.Producer + authenticators map[string]runtime.Authenticator + authorizer runtime.Authorizer + operations map[string]map[string]runtime.OperationHandler + ServeError func(http.ResponseWriter, *http.Request, error) + Models map[string]func() interface{} + formats strfmt.Registry +} + +// WithJSONDefaults loads the json defaults for this api +func (d *API) WithJSONDefaults() *API { + d.DefaultConsumes = runtime.JSONMime + d.DefaultProduces = runtime.JSONMime + d.consumers[runtime.JSONMime] = runtime.JSONConsumer() + d.producers[runtime.JSONMime] = runtime.JSONProducer() + return d +} + +// WithoutJSONDefaults clears the json defaults for this api +func (d *API) WithoutJSONDefaults() *API { + d.DefaultConsumes = "" + d.DefaultProduces = "" + delete(d.consumers, runtime.JSONMime) + delete(d.producers, runtime.JSONMime) + return d +} + +// Formats returns the registered string formats +func (d *API) Formats() strfmt.Registry { + if d.formats == nil { + d.formats = strfmt.NewFormats() + } + return d.formats +} + +// RegisterFormat registers a custom format validator +func (d *API) RegisterFormat(name string, format strfmt.Format, validator strfmt.Validator) { + if d.formats == nil { + d.formats = strfmt.NewFormats() + } + d.formats.Add(name, format, validator) +} + +// RegisterAuth registers an auth handler in this api +func (d *API) RegisterAuth(scheme string, handler runtime.Authenticator) { + if d.authenticators == nil { + d.authenticators = make(map[string]runtime.Authenticator) + } + d.authenticators[scheme] = handler +} + +// RegisterAuthorizer registers an authorizer handler in this api +func (d *API) RegisterAuthorizer(handler runtime.Authorizer) { + d.authorizer = handler +} + +// RegisterConsumer registers a consumer for a media type. +func (d *API) RegisterConsumer(mediaType string, handler runtime.Consumer) { + if d.consumers == nil { + d.consumers = make(map[string]runtime.Consumer, 10) + } + d.consumers[strings.ToLower(mediaType)] = handler +} + +// RegisterProducer registers a producer for a media type +func (d *API) RegisterProducer(mediaType string, handler runtime.Producer) { + if d.producers == nil { + d.producers = make(map[string]runtime.Producer, 10) + } + d.producers[strings.ToLower(mediaType)] = handler +} + +// RegisterOperation registers an operation handler for an operation name +func (d *API) RegisterOperation(method, path string, handler runtime.OperationHandler) { + if d.operations == nil { + d.operations = make(map[string]map[string]runtime.OperationHandler, 30) + } + um := strings.ToUpper(method) + if b, ok := d.operations[um]; !ok || b == nil { + d.operations[um] = make(map[string]runtime.OperationHandler) + } + d.operations[um][path] = handler +} + +// OperationHandlerFor returns the operation handler for the specified id if it can be found +func (d *API) OperationHandlerFor(method, path string) (runtime.OperationHandler, bool) { + if d.operations == nil { + return nil, false + } + if pi, ok := d.operations[strings.ToUpper(method)]; ok { + h, ok := pi[path] + return h, ok + } + return nil, false +} + +// ConsumersFor gets the consumers for the specified media types +func (d *API) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { + result := make(map[string]runtime.Consumer) + for _, mt := range mediaTypes { + if consumer, ok := d.consumers[mt]; ok { + result[mt] = consumer + } + } + return result +} + +// ProducersFor gets the producers for the specified media types +func (d *API) ProducersFor(mediaTypes []string) map[string]runtime.Producer { + result := make(map[string]runtime.Producer) + for _, mt := range mediaTypes { + if producer, ok := d.producers[mt]; ok { + result[mt] = producer + } + } + return result +} + +// AuthenticatorsFor gets the authenticators for the specified security schemes +func (d *API) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { + result := make(map[string]runtime.Authenticator) + for k := range schemes { + if a, ok := d.authenticators[k]; ok { + result[k] = a + } + } + return result +} + +// Authorizer returns the registered authorizer +func (d *API) Authorizer() runtime.Authorizer { + return d.authorizer +} + +// Validate validates this API for any missing items +func (d *API) Validate() error { + return d.validate() +} + +// validateWith validates the registrations in this API against the provided spec analyzer +func (d *API) validate() error { + var consumes []string + for k := range d.consumers { + consumes = append(consumes, k) + } + + var produces []string + for k := range d.producers { + produces = append(produces, k) + } + + var authenticators []string + for k := range d.authenticators { + authenticators = append(authenticators, k) + } + + var operations []string + for m, v := range d.operations { + for p := range v { + operations = append(operations, fmt.Sprintf("%s %s", strings.ToUpper(m), p)) + } + } + + var definedAuths []string + for k := range d.spec.Spec().SecurityDefinitions { + definedAuths = append(definedAuths, k) + } + + if err := d.verify("consumes", consumes, d.analyzer.RequiredConsumes()); err != nil { + return err + } + if err := d.verify("produces", produces, d.analyzer.RequiredProduces()); err != nil { + return err + } + if err := d.verify("operation", operations, d.analyzer.OperationMethodPaths()); err != nil { + return err + } + + requiredAuths := d.analyzer.RequiredSecuritySchemes() + if err := d.verify("auth scheme", authenticators, requiredAuths); err != nil { + return err + } + if err := d.verify("security definitions", definedAuths, requiredAuths); err != nil { + return err + } + return nil +} + +func (d *API) verify(name string, registrations []string, expectations []string) error { + + sort.Strings(registrations) + sort.Strings(expectations) + + expected := map[string]struct{}{} + seen := map[string]struct{}{} + + for _, v := range expectations { + expected[v] = struct{}{} + } + + var unspecified []string + for _, v := range registrations { + seen[v] = struct{}{} + if _, ok := expected[v]; !ok { + unspecified = append(unspecified, v) + } + } + + for k := range seen { + delete(expected, k) + } + + var unregistered []string + for k := range expected { + unregistered = append(unregistered, k) + } + sort.Strings(unspecified) + sort.Strings(unregistered) + + if len(unregistered) > 0 || len(unspecified) > 0 { + return &errors.APIVerificationFailed{ + Section: name, + MissingSpecification: unspecified, + MissingRegistration: unregistered, + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/middleware/validation.go b/vendor/github.com/go-openapi/runtime/middleware/validation.go new file mode 100644 index 000000000000..bb8df3cb3def --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/middleware/validation.go @@ -0,0 +1,122 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package middleware + +import ( + "mime" + "net/http" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/swag" +) + +type validation struct { + context *Context + result []error + request *http.Request + route *MatchedRoute + bound map[string]interface{} +} + +// ContentType validates the content type of a request +func validateContentType(allowed []string, actual string) error { + debugLog("validating content type for %q against [%s]", actual, strings.Join(allowed, ", ")) + if len(allowed) == 0 { + return nil + } + mt, _, err := mime.ParseMediaType(actual) + if err != nil { + return errors.InvalidContentType(actual, allowed) + } + if swag.ContainsStringsCI(allowed, mt) { + return nil + } + if swag.ContainsStringsCI(allowed, "*/*") { + return nil + } + parts := strings.Split(actual, "/") + if len(parts) == 2 && swag.ContainsStringsCI(allowed, parts[0]+"/*") { + return nil + } + return errors.InvalidContentType(actual, allowed) +} + +func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation { + debugLog("validating request %s %s", request.Method, request.URL.EscapedPath()) + validate := &validation{ + context: ctx, + request: request, + route: route, + bound: make(map[string]interface{}), + } + + validate.contentType() + if len(validate.result) == 0 { + validate.responseFormat() + } + if len(validate.result) == 0 { + validate.parameters() + } + + return validate +} + +func (v *validation) parameters() { + debugLog("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath()) + if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil { + if result.Error() == "validation failure list" { + for _, e := range result.(*errors.Validation).Value.([]interface{}) { + v.result = append(v.result, e.(error)) + } + return + } + v.result = append(v.result, result) + } +} + +func (v *validation) contentType() { + if len(v.result) == 0 && runtime.HasBody(v.request) { + debugLog("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath()) + ct, _, req, err := v.context.ContentType(v.request) + if err != nil { + v.result = append(v.result, err) + } else { + v.request = req + } + + if len(v.result) == 0 { + if err := validateContentType(v.route.Consumes, ct); err != nil { + v.result = append(v.result, err) + } + } + if ct != "" && v.route.Consumer == nil { + cons, ok := v.route.Consumers[ct] + if !ok { + v.result = append(v.result, errors.New(500, "no consumer registered for %s", ct)) + } else { + v.route.Consumer = cons + } + } + } +} + +func (v *validation) responseFormat() { + if str, rCtx := v.context.ResponseFormat(v.request, v.route.Produces); str == "" && runtime.HasBody(v.request) { + v.request = rCtx + v.result = append(v.result, errors.InvalidResponseFormat(v.request.Header.Get(runtime.HeaderAccept), v.route.Produces)) + } +} diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go new file mode 100644 index 000000000000..9e51b42b59d0 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/request.go @@ -0,0 +1,139 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bufio" + "io" + "net/http" + "strings" + + "github.com/go-openapi/swag" +) + +// CanHaveBody returns true if this method can have a body +func CanHaveBody(method string) bool { + mn := strings.ToUpper(method) + return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" +} + +// IsSafe returns true if this is a request with a safe method +func IsSafe(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn == "GET" || mn == "HEAD" +} + +// AllowsBody returns true if the request allows for a body +func AllowsBody(r *http.Request) bool { + mn := strings.ToUpper(r.Method) + return mn != "HEAD" +} + +// HasBody returns true if this method needs a content-type +func HasBody(r *http.Request) bool { + // happy case: we have a content length set + if r.ContentLength > 0 { + return true + } + + if r.Header.Get(http.CanonicalHeaderKey("content-length")) != "" { + // in this case, no Transfer-Encoding should be present + // we have a header set but it was explicitly set to 0, so we assume no body + return false + } + + rdr := newPeekingReader(r.Body) + r.Body = rdr + return rdr.HasContent() +} + +func newPeekingReader(r io.ReadCloser) *peekingReader { + if r == nil { + return nil + } + return &peekingReader{ + underlying: bufio.NewReader(r), + orig: r, + } +} + +type peekingReader struct { + underlying interface { + Buffered() int + Peek(int) ([]byte, error) + Read([]byte) (int, error) + } + orig io.ReadCloser +} + +func (p *peekingReader) HasContent() bool { + if p == nil { + return false + } + if p.underlying.Buffered() > 0 { + return true + } + b, err := p.underlying.Peek(1) + if err != nil { + return false + } + return len(b) > 0 +} + +func (p *peekingReader) Read(d []byte) (int, error) { + if p == nil { + return 0, io.EOF + } + return p.underlying.Read(d) +} + +func (p *peekingReader) Close() error { + p.underlying = nil + if p.orig != nil { + return p.orig.Close() + } + return nil +} + +// JSONRequest creates a new http request with json headers set +func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + req.Header.Add(HeaderContentType, JSONMime) + req.Header.Add(HeaderAccept, JSONMime) + return req, nil +} + +// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) +type Gettable interface { + GetOK(string) ([]string, bool, bool) +} + +// ReadSingleValue reads a single value from the source +func ReadSingleValue(values Gettable, name string) string { + vv, _, hv := values.GetOK(name) + if hv { + return vv[len(vv)-1] + } + return "" +} + +// ReadCollectionValue reads a collection value from a string data source +func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { + v := ReadSingleValue(values, name) + return swag.SplitByFormat(v, collectionFormat) +} diff --git a/vendor/github.com/go-openapi/runtime/security/authenticator.go b/vendor/github.com/go-openapi/runtime/security/authenticator.go new file mode 100644 index 000000000000..5d058b8d1fb2 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/security/authenticator.go @@ -0,0 +1,275 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package security + +import ( + "context" + "net/http" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" +) + +const ( + query = "query" + header = "header" +) + +// HttpAuthenticator is a function that authenticates a HTTP request +func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator { + return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { + if request, ok := params.(*http.Request); ok { + return handler(request) + } + if scoped, ok := params.(*ScopedAuthRequest); ok { + return handler(scoped.Request) + } + return false, nil, nil + }) +} + +// ScopedAuthenticator is a function that authenticates a HTTP request against a list of valid scopes +func ScopedAuthenticator(handler func(*ScopedAuthRequest) (bool, interface{}, error)) runtime.Authenticator { + return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { + if request, ok := params.(*ScopedAuthRequest); ok { + return handler(request) + } + return false, nil, nil + }) +} + +// UserPassAuthentication authentication function +type UserPassAuthentication func(string, string) (interface{}, error) + +// UserPassAuthenticationCtx authentication function with context.Context +type UserPassAuthenticationCtx func(context.Context, string, string) (context.Context, interface{}, error) + +// TokenAuthentication authentication function +type TokenAuthentication func(string) (interface{}, error) + +// TokenAuthenticationCtx authentication function with context.Context +type TokenAuthenticationCtx func(context.Context, string) (context.Context, interface{}, error) + +// ScopedTokenAuthentication authentication function +type ScopedTokenAuthentication func(string, []string) (interface{}, error) + +// ScopedTokenAuthenticationCtx authentication function with context.Context +type ScopedTokenAuthenticationCtx func(context.Context, string, []string) (context.Context, interface{}, error) + +var DefaultRealmName = "API" + +type secCtxKey uint8 + +const ( + failedBasicAuth secCtxKey = iota + oauth2SchemeName +) + +func FailedBasicAuth(r *http.Request) string { + return FailedBasicAuthCtx(r.Context()) +} + +func FailedBasicAuthCtx(ctx context.Context) string { + v, ok := ctx.Value(failedBasicAuth).(string) + if !ok { + return "" + } + return v +} + +func OAuth2SchemeName(r *http.Request) string { + return OAuth2SchemeNameCtx(r.Context()) +} + +func OAuth2SchemeNameCtx(ctx context.Context) string { + v, ok := ctx.Value(oauth2SchemeName).(string) + if !ok { + return "" + } + return v +} + +// BasicAuth creates a basic auth authenticator with the provided authentication function +func BasicAuth(authenticate UserPassAuthentication) runtime.Authenticator { + return BasicAuthRealm(DefaultRealmName, authenticate) +} + +// BasicAuthRealm creates a basic auth authenticator with the provided authentication function and realm name +func BasicAuthRealm(realm string, authenticate UserPassAuthentication) runtime.Authenticator { + if realm == "" { + realm = DefaultRealmName + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + if usr, pass, ok := r.BasicAuth(); ok { + p, err := authenticate(usr, pass) + if err != nil { + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + } + return true, p, err + } + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + return false, nil, nil + }) +} + +// BasicAuthCtx creates a basic auth authenticator with the provided authentication function with support for context.Context +func BasicAuthCtx(authenticate UserPassAuthenticationCtx) runtime.Authenticator { + return BasicAuthRealmCtx(DefaultRealmName, authenticate) +} + +// BasicAuthRealmCtx creates a basic auth authenticator with the provided authentication function and realm name with support for context.Context +func BasicAuthRealmCtx(realm string, authenticate UserPassAuthenticationCtx) runtime.Authenticator { + if realm == "" { + realm = DefaultRealmName + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + if usr, pass, ok := r.BasicAuth(); ok { + ctx, p, err := authenticate(r.Context(), usr, pass) + if err != nil { + ctx = context.WithValue(ctx, failedBasicAuth, realm) + } + *r = *r.WithContext(ctx) + return true, p, err + } + *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) + return false, nil, nil + }) +} + +// APIKeyAuth creates an authenticator that uses a token for authorization. +// This token can be obtained from either a header or a query string +func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authenticator { + inl := strings.ToLower(in) + if inl != query && inl != header { + // panic because this is most likely a typo + panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) + } + + var getToken func(*http.Request) string + switch inl { + case header: + getToken = func(r *http.Request) string { return r.Header.Get(name) } + case query: + getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + token := getToken(r) + if token == "" { + return false, nil, nil + } + + p, err := authenticate(token) + return true, p, err + }) +} + +// APIKeyAuthCtx creates an authenticator that uses a token for authorization with support for context.Context. +// This token can be obtained from either a header or a query string +func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime.Authenticator { + inl := strings.ToLower(in) + if inl != query && inl != header { + // panic because this is most likely a typo + panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) + } + + var getToken func(*http.Request) string + switch inl { + case header: + getToken = func(r *http.Request) string { return r.Header.Get(name) } + case query: + getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } + } + + return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { + token := getToken(r) + if token == "" { + return false, nil, nil + } + + ctx, p, err := authenticate(r.Context(), token) + *r = *r.WithContext(ctx) + return true, p, err + }) +} + +// ScopedAuthRequest contains both a http request and the required scopes for a particular operation +type ScopedAuthRequest struct { + Request *http.Request + RequiredScopes []string +} + +// BearerAuth for use with oauth2 flows +func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Authenticator { + const prefix = "Bearer " + return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { + var token string + hdr := r.Request.Header.Get("Authorization") + if strings.HasPrefix(hdr, prefix) { + token = strings.TrimPrefix(hdr, prefix) + } + if token == "" { + qs := r.Request.URL.Query() + token = qs.Get("access_token") + } + //#nosec + ct, _, _ := runtime.ContentType(r.Request.Header) + if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { + token = r.Request.FormValue("access_token") + } + + if token == "" { + return false, nil, nil + } + + rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) + *r.Request = *r.Request.WithContext(rctx) + p, err := authenticate(token, r.RequiredScopes) + return true, p, err + }) +} + +// BearerAuthCtx for use with oauth2 flows with support for context.Context. +func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runtime.Authenticator { + const prefix = "Bearer " + return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { + var token string + hdr := r.Request.Header.Get("Authorization") + if strings.HasPrefix(hdr, prefix) { + token = strings.TrimPrefix(hdr, prefix) + } + if token == "" { + qs := r.Request.URL.Query() + token = qs.Get("access_token") + } + //#nosec + ct, _, _ := runtime.ContentType(r.Request.Header) + if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { + token = r.Request.FormValue("access_token") + } + + if token == "" { + return false, nil, nil + } + + rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) + ctx, p, err := authenticate(rctx, token, r.RequiredScopes) + *r.Request = *r.Request.WithContext(ctx) + return true, p, err + }) +} diff --git a/vendor/github.com/go-openapi/runtime/security/authorizer.go b/vendor/github.com/go-openapi/runtime/security/authorizer.go new file mode 100644 index 000000000000..00c1a4d6a4c3 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/security/authorizer.go @@ -0,0 +1,27 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package security + +import ( + "net/http" + + "github.com/go-openapi/runtime" +) + +// Authorized provides a default implementation of the Authorizer interface where all +// requests are authorized (successful) +func Authorized() runtime.Authorizer { + return runtime.AuthorizerFunc(func(_ *http.Request, _ interface{}) error { return nil }) +} diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go new file mode 100644 index 000000000000..3b011a0bff19 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/statuses.go @@ -0,0 +1,90 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +// Statuses lists the most common HTTP status codes to default message +// taken from https://httpstatuses.com/ +var Statuses = map[int]string{ + 100: "Continue", + 101: "Switching Protocols", + 102: "Processing", + 103: "Checkpoint", + 122: "URI too long", + 200: "OK", + 201: "Created", + 202: "Accepted", + 203: "Request Processed", + 204: "No Content", + 205: "Reset Content", + 206: "Partial Content", + 207: "Multi-Status", + 208: "Already Reported", + 226: "IM Used", + 300: "Multiple Choices", + 301: "Moved Permanently", + 302: "Found", + 303: "See Other", + 304: "Not Modified", + 305: "Use Proxy", + 306: "Switch Proxy", + 307: "Temporary Redirect", + 308: "Permanent Redirect", + 400: "Bad Request", + 401: "Unauthorized", + 402: "Payment Required", + 403: "Forbidden", + 404: "Not Found", + 405: "Method Not Allowed", + 406: "Not Acceptable", + 407: "Proxy Authentication Required", + 408: "Request Timeout", + 409: "Conflict", + 410: "Gone", + 411: "Length Required", + 412: "Precondition Failed", + 413: "Request Entity Too Large", + 414: "Request-URI Too Long", + 415: "Unsupported Media Type", + 416: "Request Range Not Satisfiable", + 417: "Expectation Failed", + 418: "I'm a teapot", + 420: "Enhance Your Calm", + 422: "Unprocessable Entity", + 423: "Locked", + 424: "Failed Dependency", + 426: "Upgrade Required", + 428: "Precondition Required", + 429: "Too Many Requests", + 431: "Request Header Fields Too Large", + 444: "No Response", + 449: "Retry With", + 450: "Blocked by Windows Parental Controls", + 451: "Wrong Exchange Server", + 499: "Client Closed Request", + 500: "Internal Server Error", + 501: "Not Implemented", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", + 505: "HTTP Version Not Supported", + 506: "Variant Also Negotiates", + 507: "Insufficient Storage", + 508: "Loop Detected", + 509: "Bandwidth Limit Exceeded", + 510: "Not Extended", + 511: "Network Authentication Required", + 598: "Network read timeout error", + 599: "Network connect timeout error", +} diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go new file mode 100644 index 000000000000..c7fd04c3c5c1 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/text.go @@ -0,0 +1,117 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + + "github.com/go-openapi/swag" +) + +// TextConsumer creates a new text consumer +func TextConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + if reader == nil { + return errors.New("TextConsumer requires a reader") // early exit + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + if err != nil { + return err + } + b := buf.Bytes() + + // If the buffer is empty, no need to unmarshal it, which causes a panic. + if len(b) == 0 { + data = "" + return nil + } + + if tu, ok := data.(encoding.TextUnmarshaler); ok { + err := tu.UnmarshalText(b) + if err != nil { + return fmt.Errorf("text consumer: %v", err) + } + + return nil + } + + t := reflect.TypeOf(data) + if data != nil && t.Kind() == reflect.Ptr { + v := reflect.Indirect(reflect.ValueOf(data)) + if t.Elem().Kind() == reflect.String { + v.SetString(string(b)) + return nil + } + } + + return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s", + data, data, "can be resolved by supporting TextUnmarshaler interface") + }) +} + +// TextProducer creates a new text producer +func TextProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + if writer == nil { + return errors.New("TextProducer requires a writer") // early exit + } + + if data == nil { + return errors.New("no data given to produce text from") + } + + if tm, ok := data.(encoding.TextMarshaler); ok { + txt, err := tm.MarshalText() + if err != nil { + return fmt.Errorf("text producer: %v", err) + } + _, err = writer.Write(txt) + return err + } + + if str, ok := data.(error); ok { + _, err := writer.Write([]byte(str.Error())) + return err + } + + if str, ok := data.(fmt.Stringer); ok { + _, err := writer.Write([]byte(str.String())) + return err + } + + v := reflect.Indirect(reflect.ValueOf(data)) + if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { + b, err := swag.WriteJSON(data) + if err != nil { + return err + } + _, err = writer.Write(b) + return err + } + if v.Kind() != reflect.String { + return fmt.Errorf("%T is not a supported type by the TextProducer", data) + } + + _, err := writer.Write([]byte(v.String())) + return err + }) +} diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go new file mode 100644 index 000000000000..11f5732af4e3 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/values.go @@ -0,0 +1,19 @@ +package runtime + +// Values typically represent parameters on a http request. +type Values map[string][]string + +// GetOK returns the values collection for the given key. +// When the key is present in the map it will return true for hasKey. +// When the value is not empty it will return true for hasValue. +func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) { + value, hasKey = v[key] + if !hasKey { + return + } + if len(value) == 0 { + return + } + hasValue = true + return +} diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go new file mode 100644 index 000000000000..821c7393dfbb --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/xml.go @@ -0,0 +1,36 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package runtime + +import ( + "encoding/xml" + "io" +) + +// XMLConsumer creates a new XML consumer +func XMLConsumer() Consumer { + return ConsumerFunc(func(reader io.Reader, data interface{}) error { + dec := xml.NewDecoder(reader) + return dec.Decode(data) + }) +} + +// XMLProducer creates a new XML producer +func XMLProducer() Producer { + return ProducerFunc(func(writer io.Writer, data interface{}) error { + enc := xml.NewEncoder(writer) + return enc.Encode(data) + }) +} diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig new file mode 100644 index 000000000000..3152da69a5d7 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore new file mode 100644 index 000000000000..dd91ed6a04e6 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml new file mode 100644 index 000000000000..3e33f9f2e3ec --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -0,0 +1,23 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 2 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals diff --git a/vendor/github.com/go-openapi/spec/.travis.yml b/vendor/github.com/go-openapi/spec/.travis.yml new file mode 100644 index 000000000000..aa26d8763aa3 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go +notifications: + slack: + secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= +script: +- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..9322b065e37a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md new file mode 100644 index 000000000000..6354742cbf62 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/README.md @@ -0,0 +1,10 @@ +# OAI object model [![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/spec.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) + +The object model for OpenAPI specification documents. + +Currently supports Swagger 2.0. diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go new file mode 100644 index 000000000000..d5ec7b900a73 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/bindata.go @@ -0,0 +1,297 @@ +// Code generated by go-bindata. DO NOT EDIT. +// sources: +// schemas/jsonschema-draft-04.json (4.357kB) +// schemas/v2/schema.json (40.249kB) + +package spec + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo + digest [sha256.Size]byte +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") + +func jsonschemaDraft04JSONBytes() ([]byte, error) { + return bindataRead( + _jsonschemaDraft04JSON, + "jsonschema-draft-04.json", + ) +} + +func jsonschemaDraft04JSON() (*asset, error) { + bytes, err := jsonschemaDraft04JSONBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} + return a, nil +} + +var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\x1f\xfd\x37\x00\x00\xff\xff\x69\x5d\x0a\x6a\x39\x9d\x00\x00") + +func v2SchemaJSONBytes() ([]byte, error) { + return bindataRead( + _v2SchemaJSON, + "v2/schema.json", + ) +} + +func v2SchemaJSON() (*asset, error) { + bytes, err := v2SchemaJSONBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcb, 0x25, 0x27, 0xe8, 0x46, 0xae, 0x22, 0xc4, 0xf4, 0x8b, 0x1, 0x32, 0x4d, 0x1f, 0xf8, 0xdf, 0x75, 0x15, 0xc8, 0x2d, 0xc7, 0xed, 0xe, 0x7e, 0x0, 0x75, 0xc0, 0xf9, 0xd2, 0x1f, 0x75, 0x57}} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "jsonschema-draft-04.json": jsonschemaDraft04JSON, + + "v2/schema.json": v2SchemaJSON, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}}, + "v2": &bintree{nil, map[string]*bintree{ + "schema.json": &bintree{v2SchemaJSON, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory. +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) +} + +// RestoreAssets restores an asset under the given directory recursively. +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) +} diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go new file mode 100644 index 000000000000..3fada0daef1b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/cache.go @@ -0,0 +1,60 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import "sync" + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (interface{}, bool) + Set(string, interface{}) +} + +type simpleCache struct { + lock sync.RWMutex + store map[string]interface{} +} + +// Get retrieves a cached URI +func (s *simpleCache) Get(uri string) (interface{}, bool) { + debugLog("getting %q from resolution cache", uri) + s.lock.RLock() + v, ok := s.store[uri] + debugLog("got %q from resolution cache: %t", uri, ok) + + s.lock.RUnlock() + return v, ok +} + +// Set caches a URI +func (s *simpleCache) Set(uri string, data interface{}) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +var resCache ResolutionCache + +func init() { + resCache = initResolutionCache() +} + +// initResolutionCache initializes the URI resolution cache +func initResolutionCache() ResolutionCache { + return &simpleCache{store: map[string]interface{}{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go new file mode 100644 index 000000000000..f285970aa19d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/contact_info.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ContactInfo contact information for the exposed API. +// +// For more information: http://goo.gl/8us55a#contactObject +type ContactInfo struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + Email string `json:"email,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go new file mode 100644 index 000000000000..389c528ff613 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/debug.go @@ -0,0 +1,47 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "log" + "os" + "path/filepath" + "runtime" +) + +var ( + // Debug is true when the SWAGGER_DEBUG env var is not empty. + // It enables a more verbose logging of this package. + Debug = os.Getenv("SWAGGER_DEBUG") != "" + // specLogger is a debug logger for this package + specLogger *log.Logger +) + +func init() { + debugOptions() +} + +func debugOptions() { + specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) +} + +func debugLog(msg string, args ...interface{}) { + // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() + if Debug { + _, file1, pos1, _ := runtime.Caller(1) + specLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go new file mode 100644 index 000000000000..1e7fc8c490c6 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -0,0 +1,650 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ExpandOptions provides options for spec expand +type ExpandOptions struct { + RelativeBase string + SkipSchemas bool + ContinueOnError bool + AbsoluteCircularRef bool +} + +// ResolveRefWithBase resolves a reference against a context root with preservation of base path +func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) { + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return nil, err + } + specBasePath := "" + if opts != nil && opts.RelativeBase != "" { + specBasePath, _ = absPath(opts.RelativeBase) + } + + result := new(Schema) + if err := resolver.Resolve(ref, result, specBasePath); err != nil { + return nil, err + } + return result, nil +} + +// ResolveRef resolves a reference against a context root +// ref is guaranteed to be in root (no need to go to external files) +// ResolveRef is ONLY called from the code generation module +func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { + res, _, err := ref.GetPointer().Get(root) + if err != nil { + panic(err) + } + switch sch := res.(type) { + case Schema: + return &sch, nil + case *Schema: + return sch, nil + case map[string]interface{}: + b, _ := json.Marshal(sch) + newSch := new(Schema) + _ = json.Unmarshal(b, newSch) + return newSch, nil + default: + return nil, fmt.Errorf("unknown type for the resolved reference") + } +} + +// ResolveParameter resolves a parameter reference against a context root +func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { + return ResolveParameterWithBase(root, ref, nil) +} + +// ResolveParameterWithBase resolves a parameter reference against a context root and base path +func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) { + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return nil, err + } + + result := new(Parameter) + if err := resolver.Resolve(&ref, result, ""); err != nil { + return nil, err + } + return result, nil +} + +// ResolveResponse resolves response a reference against a context root +func ResolveResponse(root interface{}, ref Ref) (*Response, error) { + return ResolveResponseWithBase(root, ref, nil) +} + +// ResolveResponseWithBase resolves response a reference against a context root and base path +func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) { + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return nil, err + } + + result := new(Response) + if err := resolver.Resolve(&ref, result, ""); err != nil { + return nil, err + } + return result, nil +} + +// ResolveItems resolves parameter items reference against a context root and base path. +// +// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. +// Similarly, $ref are forbidden in response headers. +func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) { + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return nil, err + } + basePath := "" + if opts.RelativeBase != "" { + basePath = opts.RelativeBase + } + result := new(Items) + if err := resolver.Resolve(&ref, result, basePath); err != nil { + return nil, err + } + return result, nil +} + +// ResolvePathItem resolves response a path item against a context root and base path +func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) { + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return nil, err + } + basePath := "" + if opts.RelativeBase != "" { + basePath = opts.RelativeBase + } + result := new(PathItem) + if err := resolver.Resolve(&ref, result, basePath); err != nil { + return nil, err + } + return result, nil +} + +// ExpandSpec expands the references in a swagger spec +func ExpandSpec(spec *Swagger, options *ExpandOptions) error { + resolver, err := defaultSchemaLoader(spec, options, nil, nil) + // Just in case this ever returns an error. + if resolver.shouldStopOnError(err) { + return err + } + + // getting the base path of the spec to adjust all subsequent reference resolutions + specBasePath := "" + if options != nil && options.RelativeBase != "" { + specBasePath, _ = absPath(options.RelativeBase) + } + + if options == nil || !options.SkipSchemas { + for key, definition := range spec.Definitions { + var def *Schema + var err error + if def, err = expandSchema(definition, []string{fmt.Sprintf("#/definitions/%s", key)}, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + if def != nil { + spec.Definitions[key] = *def + } + } + } + + for key := range spec.Parameters { + parameter := spec.Parameters[key] + if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Parameters[key] = parameter + } + + for key := range spec.Responses { + response := spec.Responses[key] + if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Responses[key] = response + } + + if spec.Paths != nil { + for key := range spec.Paths.Paths { + path := spec.Paths.Paths[key] + if err := expandPathItem(&path, resolver, specBasePath); resolver.shouldStopOnError(err) { + return err + } + spec.Paths.Paths[key] = path + } + } + + return nil +} + +// baseForRoot loads in the cache the root document and produces a fake "root" base path entry +// for further $ref resolution +func baseForRoot(root interface{}, cache ResolutionCache) string { + // cache the root document to resolve $ref's + const rootBase = "root" + if root != nil { + base, _ := absPath(rootBase) + normalizedBase := normalizeAbsPath(base) + debugLog("setting root doc in cache at: %s", normalizedBase) + if cache == nil { + cache = resCache + } + cache.Set(normalizedBase, root) + return rootBase + } + return "" +} + +// ExpandSchema expands the refs in the schema object with reference to the root object +// go-openapi/validate uses this function +// notice that it is impossible to reference a json schema in a different file other than root +func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { + opts := &ExpandOptions{ + // when a root is specified, cache the root as an in-memory document for $ref retrieval + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, + } + return ExpandSchemaWithBasePath(schema, cache, opts) +} + +// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options +func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { + if schema == nil { + return nil + } + + var basePath string + if opts.RelativeBase != "" { + basePath, _ = absPath(opts.RelativeBase) + } + + resolver, err := defaultSchemaLoader(nil, opts, cache, nil) + if err != nil { + return err + } + + refs := []string{""} + var s *Schema + if s, err = expandSchema(*schema, refs, resolver, basePath); err != nil { + return err + } + *schema = *s + return nil +} + +func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Items != nil { + if target.Items.Schema != nil { + t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + *target.Items.Schema = *t + } + for i := range target.Items.Schemas { + t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) + if err != nil { + return nil, err + } + target.Items.Schemas[i] = *t + } + } + return &target, nil +} + +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { + if target.Ref.String() == "" && target.Ref.IsRoot() { + // normalizing is important + newRef := normalizeFileRef(&target.Ref, basePath) + target.Ref = *newRef + return &target, nil + + } + + // change the base path of resolution when an ID is encountered + // otherwise the basePath should inherit the parent's + // important: ID can be relative path + if target.ID != "" { + debugLog("schema has ID: %s", target.ID) + // handling the case when id is a folder + // remember that basePath has to be a file + refPath := target.ID + if strings.HasSuffix(target.ID, "/") { + // path.Clean here would not work correctly if basepath is http + refPath = fmt.Sprintf("%s%s", refPath, "placeholder.json") + } + basePath = normalizePaths(refPath, basePath) + } + + var t *Schema + // if Ref is found, everything else doesn't matter + // Ref also changes the resolution scope of children expandSchema + if target.Ref.String() != "" { + // here the resolution scope is changed because a $ref was encountered + normalizedRef := normalizeFileRef(&target.Ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if resolver.isCircular(normalizedRef, basePath, parentRefs...) { + // this means there is a cycle in the recursion tree: return the Ref + // - circular refs cannot be expanded. We leave them as ref. + // - denormalization means that a new local file ref is set relative to the original basePath + debugLog("shortcut circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", + basePath, normalizedBasePath, normalizedRef.String()) + if !resolver.options.AbsoluteCircularRef { + target.Ref = *denormalizeFileRef(normalizedRef, normalizedBasePath, resolver.context.basePath) + } else { + target.Ref = *normalizedRef + } + return &target, nil + } + + debugLog("basePath: %s: calling Resolve with target: %#v", basePath, target) + if err := resolver.Resolve(&target.Ref, &t, basePath); resolver.shouldStopOnError(err) { + return nil, err + } + + if t != nil { + parentRefs = append(parentRefs, normalizedRef.String()) + var err error + transitiveResolver, err := resolver.transitiveResolver(basePath, target.Ref) + if transitiveResolver.shouldStopOnError(err) { + return nil, err + } + + basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) + + return expandSchema(*t, parentRefs, transitiveResolver, basePath) + } + } + + t, err := expandItems(target, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target = *t + } + + for i := range target.AllOf { + t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + target.AllOf[i] = *t + } + for i := range target.AnyOf { + t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + target.AnyOf[i] = *t + } + for i := range target.OneOf { + t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.OneOf[i] = *t + } + } + if target.Not != nil { + t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Not = *t + } + } + for k := range target.Properties { + t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.Properties[k] = *t + } + } + if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { + t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalProperties.Schema = *t + } + } + for k := range target.PatternProperties { + t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.PatternProperties[k] = *t + } + } + for k := range target.Dependencies { + if target.Dependencies[k].Schema != nil { + t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.Dependencies[k].Schema = *t + } + } + } + if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { + t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + *target.AdditionalItems.Schema = *t + } + } + for k := range target.Definitions { + t, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return &target, err + } + if t != nil { + target.Definitions[k] = *t + } + } + return &target, nil +} + +func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { + if pathItem == nil { + return nil + } + + parentRefs := []string{} + if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + if pathItem.Ref.String() != "" { + var err error + resolver, err = resolver.transitiveResolver(basePath, pathItem.Ref) + if resolver.shouldStopOnError(err) { + return err + } + } + pathItem.Ref = Ref{} + + for idx := range pathItem.Parameters { + if err := expandParameterOrResponse(&(pathItem.Parameters[idx]), resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + ops := []*Operation{ + pathItem.Get, + pathItem.Head, + pathItem.Options, + pathItem.Put, + pathItem.Post, + pathItem.Patch, + pathItem.Delete, + } + for _, op := range ops { + if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + } + return nil +} + +func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { + if op == nil { + return nil + } + + for i := range op.Parameters { + param := op.Parameters[i] + if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + op.Parameters[i] = param + } + + if op.Responses != nil { + responses := op.Responses + if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + for code := range responses.StatusCodeResponses { + response := responses.StatusCodeResponses[code] + if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { + return err + } + responses.StatusCodeResponses[code] = response + } + } + return nil +} + +// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document +func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, + } + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return err + } + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandResponse expands a response based on a basepath +// This is the exported version of expandResponse +// all refs inside response will be resolved relative to basePath +func ExpandResponse(response *Response, basePath string) error { + var specBasePath string + if basePath != "" { + specBasePath, _ = absPath(basePath) + } + opts := &ExpandOptions{ + RelativeBase: specBasePath, + } + resolver, err := defaultSchemaLoader(nil, opts, nil, nil) + if err != nil { + return err + } + + return expandParameterOrResponse(response, resolver, opts.RelativeBase) +} + +// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document +func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { + opts := &ExpandOptions{ + RelativeBase: baseForRoot(root, cache), + SkipSchemas: false, + ContinueOnError: false, + // when no base path is specified, remaining $ref (circular) are rendered with an absolute path + AbsoluteCircularRef: true, + } + resolver, err := defaultSchemaLoader(root, opts, nil, nil) + if err != nil { + return err + } + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +// ExpandParameter expands a parameter based on a basepath. +// This is the exported version of expandParameter +// all refs inside parameter will be resolved relative to basePath +func ExpandParameter(parameter *Parameter, basePath string) error { + var specBasePath string + if basePath != "" { + specBasePath, _ = absPath(basePath) + } + opts := &ExpandOptions{ + RelativeBase: specBasePath, + } + resolver, err := defaultSchemaLoader(nil, opts, nil, nil) + if err != nil { + return err + } + + return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) +} + +func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { + var ref *Ref + var sch *Schema + switch refable := input.(type) { + case *Parameter: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + case *Response: + if refable == nil { + return nil, nil, nil + } + ref = &refable.Ref + sch = refable.Schema + default: + return nil, nil, fmt.Errorf("expand: unsupported type %T. Input should be of type *Parameter or *Response", input) + } + return ref, sch, nil +} + +func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { + ref, _, err := getRefAndSchema(input) + if err != nil { + return err + } + if ref == nil { + return nil + } + parentRefs := []string{} + if err := resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + ref, sch, _ := getRefAndSchema(input) + if ref.String() != "" { + transitiveResolver, err := resolver.transitiveResolver(basePath, *ref) + if transitiveResolver.shouldStopOnError(err) { + return err + } + basePath = resolver.updateBasePath(transitiveResolver, basePath) + resolver = transitiveResolver + } + + if sch != nil && sch.Ref.String() != "" { + // schema expanded to a $ref in another root + var ern error + sch.Ref, ern = NewRef(normalizePaths(sch.Ref.String(), ref.RemoteURI())) + if ern != nil { + return ern + } + } + if ref != nil { + *ref = Ref{} + } + + if !resolver.options.SkipSchemas && sch != nil { + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return err + } + *sch = *s + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go new file mode 100644 index 000000000000..88add91b2b8b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/external_docs.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ExternalDocumentation allows referencing an external resource for +// extended documentation. +// +// For more information: http://goo.gl/8us55a#externalDocumentationObject +type ExternalDocumentation struct { + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/go.mod b/vendor/github.com/go-openapi/spec/go.mod new file mode 100644 index 000000000000..42073be00755 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/go.mod @@ -0,0 +1,14 @@ +module github.com/go-openapi/spec + +require ( + github.com/go-openapi/jsonpointer v0.19.2 + github.com/go-openapi/jsonreference v0.19.2 + github.com/go-openapi/swag v0.19.2 + github.com/kr/pty v1.1.5 // indirect + github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/testify v1.3.0 + golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 // indirect + golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f // indirect + golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 // indirect + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/vendor/github.com/go-openapi/spec/go.sum b/vendor/github.com/go-openapi/spec/go.sum new file mode 100644 index 000000000000..73e97a2d73e1 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/go.sum @@ -0,0 +1,66 @@ +github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-openapi/jsonpointer v0.17.0 h1:nH6xp8XdXHx8dqveo0ZuJBluCO2qGrPbDNZ0dwoRHP0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzNj2MMfeg8= +github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk= +github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/swag v0.17.0 h1:iqrgMg7Q7SvtbWLlltPrkMs0UBJI6oTSs79JFRUi880= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go new file mode 100644 index 000000000000..39efe452bb09 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/header.go @@ -0,0 +1,197 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonArray = "array" +) + +// HeaderProps describes a response header +type HeaderProps struct { + Description string `json:"description,omitempty"` +} + +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + CommonValidations + SimpleSchema + VendorExtensible + HeaderProps +} + +// ResponseHeader creates a new header instance for use in a response +func ResponseHeader() *Header { + return new(Header) +} + +// WithDescription sets the description on this response, allows for chaining +func (h *Header) WithDescription(description string) *Header { + h.Description = description + return h +} + +// Typed a fluent builder method for the type of parameter +func (h *Header) Typed(tpe, format string) *Header { + h.Type = tpe + h.Format = format + return h +} + +// CollectionOf a fluent builder method for an array item +func (h *Header) CollectionOf(items *Items, format string) *Header { + h.Type = jsonArray + h.Items = items + h.CollectionFormat = format + return h +} + +// WithDefault sets the default value on this item +func (h *Header) WithDefault(defaultValue interface{}) *Header { + h.Default = defaultValue + return h +} + +// WithMaxLength sets a max length value +func (h *Header) WithMaxLength(max int64) *Header { + h.MaxLength = &max + return h +} + +// WithMinLength sets a min length value +func (h *Header) WithMinLength(min int64) *Header { + h.MinLength = &min + return h +} + +// WithPattern sets a pattern value +func (h *Header) WithPattern(pattern string) *Header { + h.Pattern = pattern + return h +} + +// WithMultipleOf sets a multiple of value +func (h *Header) WithMultipleOf(number float64) *Header { + h.MultipleOf = &number + return h +} + +// WithMaximum sets a maximum number value +func (h *Header) WithMaximum(max float64, exclusive bool) *Header { + h.Maximum = &max + h.ExclusiveMaximum = exclusive + return h +} + +// WithMinimum sets a minimum number value +func (h *Header) WithMinimum(min float64, exclusive bool) *Header { + h.Minimum = &min + h.ExclusiveMinimum = exclusive + return h +} + +// WithEnum sets a the enum values (replace) +func (h *Header) WithEnum(values ...interface{}) *Header { + h.Enum = append([]interface{}{}, values...) + return h +} + +// WithMaxItems sets the max items +func (h *Header) WithMaxItems(size int64) *Header { + h.MaxItems = &size + return h +} + +// WithMinItems sets the min items +func (h *Header) WithMinItems(size int64) *Header { + h.MinItems = &size + return h +} + +// UniqueValues dictates that this array can only have unique items +func (h *Header) UniqueValues() *Header { + h.UniqueItems = true + return h +} + +// AllowDuplicates this array can have duplicates +func (h *Header) AllowDuplicates() *Header { + h.UniqueItems = false + return h +} + +// MarshalJSON marshal this to JSON +func (h Header) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(h.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// UnmarshalJSON unmarshals this header from JSON +func (h *Header) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &h.HeaderProps) +} + +// JSONLookup look up a value by the json property name +func (h Header) JSONLookup(token string) (interface{}, error) { + if ex, ok := h.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go new file mode 100644 index 000000000000..c458b49b216a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/info.go @@ -0,0 +1,165 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Extensions vendor specific extensions +type Extensions map[string]interface{} + +// Add adds a value to these extensions +func (e Extensions) Add(key string, value interface{}) { + realKey := strings.ToLower(key) + e[realKey] = value +} + +// GetString gets a string value from the extensions +func (e Extensions) GetString(key string) (string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(string) + return str, ok + } + return "", false +} + +// GetBool gets a string value from the extensions +func (e Extensions) GetBool(key string) (bool, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(bool) + return str, ok + } + return false, false +} + +// GetStringSlice gets a string value from the extensions +func (e Extensions) GetStringSlice(key string) ([]string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + arr, isSlice := v.([]interface{}) + if !isSlice { + return nil, false + } + var strs []string + for _, iface := range arr { + str, isString := iface.(string) + if !isString { + return nil, false + } + strs = append(strs, str) + } + return strs, ok + } + return nil, false +} + +// VendorExtensible composition block. +type VendorExtensible struct { + Extensions Extensions +} + +// AddExtension adds an extension to this extensible object +func (v *VendorExtensible) AddExtension(key string, value interface{}) { + if value == nil { + return + } + if v.Extensions == nil { + v.Extensions = make(map[string]interface{}) + } + v.Extensions.Add(key, value) +} + +// MarshalJSON marshals the extensions to json +func (v VendorExtensible) MarshalJSON() ([]byte, error) { + toser := make(map[string]interface{}) + for k, v := range v.Extensions { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + toser[k] = v + } + } + return json.Marshal(toser) +} + +// UnmarshalJSON for this extensible object +func (v *VendorExtensible) UnmarshalJSON(data []byte) error { + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if v.Extensions == nil { + v.Extensions = map[string]interface{}{} + } + v.Extensions[k] = vv + } + } + return nil +} + +// InfoProps the properties for an info definition +type InfoProps struct { + Description string `json:"description,omitempty"` + Title string `json:"title,omitempty"` + TermsOfService string `json:"termsOfService,omitempty"` + Contact *ContactInfo `json:"contact,omitempty"` + License *License `json:"license,omitempty"` + Version string `json:"version,omitempty"` +} + +// Info object provides metadata about the API. +// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. +// +// For more information: http://goo.gl/8us55a#infoObject +type Info struct { + VendorExtensible + InfoProps +} + +// JSONLookup look up a value by the json property name +func (i Info) JSONLookup(token string) (interface{}, error) { + if ex, ok := i.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(i.InfoProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (i Info) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.InfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (i *Info) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &i.InfoProps); err != nil { + return err + } + return json.Unmarshal(data, &i.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go new file mode 100644 index 000000000000..365d1631582a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/items.go @@ -0,0 +1,244 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + jsonRef = "$ref" +) + +// SimpleSchema describe swagger simple schemas for parameters and headers +type SimpleSchema struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitempty"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// TypeName return the type (or format) of a simple schema +func (s *SimpleSchema) TypeName() string { + if s.Format != "" { + return s.Format + } + return s.Type +} + +// ItemsTypeName yields the type of items in a simple schema array +func (s *SimpleSchema) ItemsTypeName() string { + if s.Items == nil { + return "" + } + return s.Items.TypeName() +} + +// CommonValidations describe common JSON-schema validations +type CommonValidations struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + +// Items a limited subset of JSON-Schema's items object. +// It is used by parameter definitions that are not located in "body". +// +// For more information: http://goo.gl/8us55a#items-object +type Items struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible +} + +// NewItems creates a new instance of items +func NewItems() *Items { + return &Items{} +} + +// Typed a fluent builder method for the type of item +func (i *Items) Typed(tpe, format string) *Items { + i.Type = tpe + i.Format = format + return i +} + +// AsNullable flags this schema as nullable. +func (i *Items) AsNullable() *Items { + i.Nullable = true + return i +} + +// CollectionOf a fluent builder method for an array item +func (i *Items) CollectionOf(items *Items, format string) *Items { + i.Type = jsonArray + i.Items = items + i.CollectionFormat = format + return i +} + +// WithDefault sets the default value on this item +func (i *Items) WithDefault(defaultValue interface{}) *Items { + i.Default = defaultValue + return i +} + +// WithMaxLength sets a max length value +func (i *Items) WithMaxLength(max int64) *Items { + i.MaxLength = &max + return i +} + +// WithMinLength sets a min length value +func (i *Items) WithMinLength(min int64) *Items { + i.MinLength = &min + return i +} + +// WithPattern sets a pattern value +func (i *Items) WithPattern(pattern string) *Items { + i.Pattern = pattern + return i +} + +// WithMultipleOf sets a multiple of value +func (i *Items) WithMultipleOf(number float64) *Items { + i.MultipleOf = &number + return i +} + +// WithMaximum sets a maximum number value +func (i *Items) WithMaximum(max float64, exclusive bool) *Items { + i.Maximum = &max + i.ExclusiveMaximum = exclusive + return i +} + +// WithMinimum sets a minimum number value +func (i *Items) WithMinimum(min float64, exclusive bool) *Items { + i.Minimum = &min + i.ExclusiveMinimum = exclusive + return i +} + +// WithEnum sets a the enum values (replace) +func (i *Items) WithEnum(values ...interface{}) *Items { + i.Enum = append([]interface{}{}, values...) + return i +} + +// WithMaxItems sets the max items +func (i *Items) WithMaxItems(size int64) *Items { + i.MaxItems = &size + return i +} + +// WithMinItems sets the min items +func (i *Items) WithMinItems(size int64) *Items { + i.MinItems = &size + return i +} + +// UniqueValues dictates that this array can only have unique items +func (i *Items) UniqueValues() *Items { + i.UniqueItems = true + return i +} + +// AllowDuplicates this array can have duplicates +func (i *Items) AllowDuplicates() *Items { + i.UniqueItems = false + return i +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (i *Items) UnmarshalJSON(data []byte) error { + var validations CommonValidations + if err := json.Unmarshal(data, &validations); err != nil { + return err + } + var ref Refable + if err := json.Unmarshal(data, &ref); err != nil { + return err + } + var simpleSchema SimpleSchema + if err := json.Unmarshal(data, &simpleSchema); err != nil { + return err + } + var vendorExtensible VendorExtensible + if err := json.Unmarshal(data, &vendorExtensible); err != nil { + return err + } + i.Refable = ref + i.CommonValidations = validations + i.SimpleSchema = simpleSchema + i.VendorExtensible = vendorExtensible + return nil +} + +// MarshalJSON converts this items object to JSON +func (i Items) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(i.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b4, b3, b1, b2), nil +} + +// JSONLookup look up a value by the json property name +func (i Items) JSONLookup(token string) (interface{}, error) { + if token == jsonRef { + return &i.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) + return r, err +} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go new file mode 100644 index 000000000000..f20961b4fd8b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/license.go @@ -0,0 +1,23 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// License information for the exposed API. +// +// For more information: http://goo.gl/8us55a#licenseObject +type License struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go new file mode 100644 index 000000000000..b8957e7c0c18 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -0,0 +1,152 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "fmt" + "net/url" + "os" + "path" + "path/filepath" + "strings" +) + +// normalize absolute path for cache. +// on Windows, drive letters should be converted to lower as scheme in net/url.URL +func normalizeAbsPath(path string) string { + u, err := url.Parse(path) + if err != nil { + debugLog("normalize absolute path failed: %s", err) + return path + } + return u.String() +} + +// base or refPath could be a file path or a URL +// given a base absolute path and a ref path, return the absolute path of refPath +// 1) if refPath is absolute, return it +// 2) if refPath is relative, join it with basePath keeping the scheme, hosts, and ports if exists +// base could be a directory or a full file path +func normalizePaths(refPath, base string) string { + refURL, _ := url.Parse(refPath) + if path.IsAbs(refURL.Path) || filepath.IsAbs(refPath) { + // refPath is actually absolute + if refURL.Host != "" { + return refPath + } + parts := strings.Split(refPath, "#") + result := filepath.FromSlash(parts[0]) + if len(parts) == 2 { + result += "#" + parts[1] + } + return result + } + + // relative refPath + baseURL, _ := url.Parse(base) + if !strings.HasPrefix(refPath, "#") { + // combining paths + if baseURL.Host != "" { + baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) + } else { // base is a file + newBase := fmt.Sprintf("%s#%s", filepath.Join(filepath.Dir(base), filepath.FromSlash(refURL.Path)), refURL.Fragment) + return newBase + } + + } + // copying fragment from ref to base + baseURL.Fragment = refURL.Fragment + return baseURL.String() +} + +// denormalizePaths returns to simplest notation on file $ref, +// i.e. strips the absolute path and sets a path relative to the base path. +// +// This is currently used when we rewrite ref after a circular ref has been detected +func denormalizeFileRef(ref *Ref, relativeBase, originalRelativeBase string) *Ref { + debugLog("denormalizeFileRef for: %s", ref.String()) + + if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { + return ref + } + // strip relativeBase from URI + relativeBaseURL, _ := url.Parse(relativeBase) + relativeBaseURL.Fragment = "" + + if relativeBaseURL.IsAbs() && strings.HasPrefix(ref.String(), relativeBase) { + // this should work for absolute URI (e.g. http://...): we have an exact match, just trim prefix + r, _ := NewRef(strings.TrimPrefix(ref.String(), relativeBase)) + return &r + } + + if relativeBaseURL.IsAbs() { + // other absolute URL get unchanged (i.e. with a non-empty scheme) + return ref + } + + // for relative file URIs: + originalRelativeBaseURL, _ := url.Parse(originalRelativeBase) + originalRelativeBaseURL.Fragment = "" + if strings.HasPrefix(ref.String(), originalRelativeBaseURL.String()) { + // the resulting ref is in the expanded spec: return a local ref + r, _ := NewRef(strings.TrimPrefix(ref.String(), originalRelativeBaseURL.String())) + return &r + } + + // check if we may set a relative path, considering the original base path for this spec. + // Example: + // spec is located at /mypath/spec.json + // my normalized ref points to: /mypath/item.json#/target + // expected result: item.json#/target + parts := strings.Split(ref.String(), "#") + relativePath, err := filepath.Rel(path.Dir(originalRelativeBaseURL.String()), parts[0]) + if err != nil { + // there is no common ancestor (e.g. different drives on windows) + // leaves the ref unchanged + return ref + } + if len(parts) == 2 { + relativePath += "#" + parts[1] + } + r, _ := NewRef(relativePath) + return &r +} + +// relativeBase could be an ABSOLUTE file path or an ABSOLUTE URL +func normalizeFileRef(ref *Ref, relativeBase string) *Ref { + // This is important for when the reference is pointing to the root schema + if ref.String() == "" { + r, _ := NewRef(relativeBase) + return &r + } + + debugLog("normalizing %s against %s", ref.String(), relativeBase) + + s := normalizePaths(ref.String(), relativeBase) + r, _ := NewRef(s) + return &r +} + +// absPath returns the absolute path of a file +func absPath(fname string) (string, error) { + if strings.HasPrefix(fname, "http") { + return fname, nil + } + if filepath.IsAbs(fname) { + return fname, nil + } + wd, err := os.Getwd() + return filepath.Join(wd, fname), err +} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go new file mode 100644 index 000000000000..b1ebd59945b7 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -0,0 +1,398 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "sort" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +func init() { + //gob.Register(map[string][]interface{}{}) + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) +} + +// OperationProps describes an operation +// +// NOTES: +// - schemes, when present must be from [http, https, ws, wss]: see validate +// - Security is handled as a special case: see MarshalJSON function +type OperationProps struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitempty"` +} + +// MarshalJSON takes care of serializing operation properties to JSON +// +// We use a custom marhaller here to handle a special cases related to +// the Security field. We need to preserve zero length slice +// while omitting the field when the value is nil/unset. +func (op OperationProps) MarshalJSON() ([]byte, error) { + type Alias OperationProps + if op.Security == nil { + return json.Marshal(&struct { + Security []map[string][]string `json:"security,omitempty"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) + } + return json.Marshal(&struct { + Security []map[string][]string `json:"security"` + *Alias + }{ + Security: op.Security, + Alias: (*Alias)(&op), + }) +} + +// Operation describes a single API operation on a path. +// +// For more information: http://goo.gl/8us55a#operationObject +type Operation struct { + VendorExtensible + OperationProps +} + +// SuccessResponse gets a success response model +func (o *Operation) SuccessResponse() (*Response, int, bool) { + if o.Responses == nil { + return nil, 0, false + } + + responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) + for k := range o.Responses.StatusCodeResponses { + if k >= 200 && k < 300 { + responseCodes = append(responseCodes, k) + } + } + if len(responseCodes) > 0 { + sort.Ints(responseCodes) + v := o.Responses.StatusCodeResponses[responseCodes[0]] + return &v, responseCodes[0], true + } + + return o.Responses.Default, 0, false +} + +// JSONLookup look up a value by the json property name +func (o Operation) JSONLookup(token string) (interface{}, error) { + if ex, ok := o.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(o.OperationProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + return json.Unmarshal(data, &o.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (o Operation) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// NewOperation creates a new operation instance. +// It expects an ID as parameter but not passing an ID is also valid. +func NewOperation(id string) *Operation { + op := new(Operation) + op.ID = id + return op +} + +// WithID sets the ID property on this operation, allows for chaining. +func (o *Operation) WithID(id string) *Operation { + o.ID = id + return o +} + +// WithDescription sets the description on this operation, allows for chaining +func (o *Operation) WithDescription(description string) *Operation { + o.Description = description + return o +} + +// WithSummary sets the summary on this operation, allows for chaining +func (o *Operation) WithSummary(summary string) *Operation { + o.Summary = summary + return o +} + +// WithExternalDocs sets/removes the external docs for/from this operation. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (o *Operation) WithExternalDocs(description, url string) *Operation { + if description == "" && url == "" { + o.ExternalDocs = nil + return o + } + + if o.ExternalDocs == nil { + o.ExternalDocs = &ExternalDocumentation{} + } + o.ExternalDocs.Description = description + o.ExternalDocs.URL = url + return o +} + +// Deprecate marks the operation as deprecated +func (o *Operation) Deprecate() *Operation { + o.Deprecated = true + return o +} + +// Undeprecate marks the operation as not deprected +func (o *Operation) Undeprecate() *Operation { + o.Deprecated = false + return o +} + +// WithConsumes adds media types for incoming body values +func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { + o.Consumes = append(o.Consumes, mediaTypes...) + return o +} + +// WithProduces adds media types for outgoing body values +func (o *Operation) WithProduces(mediaTypes ...string) *Operation { + o.Produces = append(o.Produces, mediaTypes...) + return o +} + +// WithTags adds tags for this operation +func (o *Operation) WithTags(tags ...string) *Operation { + o.Tags = append(o.Tags, tags...) + return o +} + +// AddParam adds a parameter to this operation, when a parameter for that location +// and with that name already exists it will be replaced +func (o *Operation) AddParam(param *Parameter) *Operation { + if param == nil { + return o + } + + for i, p := range o.Parameters { + if p.Name == param.Name && p.In == param.In { + params := append(o.Parameters[:i], *param) + params = append(params, o.Parameters[i+1:]...) + o.Parameters = params + return o + } + } + + o.Parameters = append(o.Parameters, *param) + return o +} + +// RemoveParam removes a parameter from the operation +func (o *Operation) RemoveParam(name, in string) *Operation { + for i, p := range o.Parameters { + if p.Name == name && p.In == in { + o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) + return o + } + } + return o +} + +// SecuredWith adds a security scope to this operation. +func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { + o.Security = append(o.Security, map[string][]string{name: scopes}) + return o +} + +// WithDefaultResponse adds a default response to the operation. +// Passing a nil value will remove the response +func (o *Operation) WithDefaultResponse(response *Response) *Operation { + return o.RespondsWith(0, response) +} + +// RespondsWith adds a status code response to the operation. +// When the code is 0 the value of the response will be used as default response value. +// When the value of the response is nil it will be removed from the operation +func (o *Operation) RespondsWith(code int, response *Response) *Operation { + if o.Responses == nil { + o.Responses = new(Responses) + } + if code == 0 { + o.Responses.Default = response + return o + } + if response == nil { + delete(o.Responses.StatusCodeResponses, code) + return o + } + if o.Responses.StatusCodeResponses == nil { + o.Responses.StatusCodeResponses = make(map[int]Response) + } + o.Responses.StatusCodeResponses[code] = *response + return o +} + +type opsAlias OperationProps + +type gobAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *opsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (o Operation) GobEncode() ([]byte, error) { + raw := struct { + Ext VendorExtensible + Props OperationProps + }{ + Ext: o.VendorExtensible, + Props: o.OperationProps, + } + var b bytes.Buffer + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (o *Operation) GobDecode(b []byte) error { + var raw struct { + Ext VendorExtensible + Props OperationProps + } + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + o.VendorExtensible = raw.Ext + o.OperationProps = raw.Props + return nil +} + +// GobEncode provides a safe gob encoder for Operation, including empty security requirements +func (op OperationProps) GobEncode() ([]byte, error) { + raw := gobAlias{ + Alias: (*opsAlias)(&op), + } + + var b bytes.Buffer + if op.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(op.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(op.Security)) + for _, req := range op.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Operation, including empty security requirements +func (op *OperationProps) GobDecode(b []byte) error { + var raw gobAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *op = *(*OperationProps)(raw.Alias) + return nil +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go new file mode 100644 index 000000000000..cecdff54568d --- /dev/null +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -0,0 +1,321 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// QueryParam creates a query parameter +func QueryParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} +} + +// HeaderParam creates a header parameter, this is always required by default +func HeaderParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} +} + +// PathParam creates a path parameter, this is always required +func PathParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} +} + +// BodyParam creates a body parameter +func BodyParam(name string, schema *Schema) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, + SimpleSchema: SimpleSchema{Type: "object"}} +} + +// FormDataParam creates a body parameter +func FormDataParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} +} + +// FileParam creates a body parameter +func FileParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, + SimpleSchema: SimpleSchema{Type: "file"}} +} + +// SimpleArrayParam creates a param for a simple array (string, int, date etc) +func SimpleArrayParam(name, tpe, fmt string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name}, + SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", + Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} +} + +// ParamRef creates a parameter that's a json reference +func ParamRef(uri string) *Parameter { + p := new(Parameter) + p.Ref = MustCreateRef(uri) + return p +} + +// ParamProps describes the specific attributes of an operation parameter +// +// NOTE: +// - Schema is defined when "in" == "body": see validate +// - AllowEmptyValue is allowed where "in" == "query" || "formData" +type ParamProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitempty"` + Schema *Schema `json:"schema,omitempty"` + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` +} + +// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). +// +// There are five possible parameter types. +// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// * Header - Custom headers that are expected as part of the request. +// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// For more information: http://goo.gl/8us55a#parameterObject +type Parameter struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible + ParamProps +} + +// JSONLookup look up a value by the json property name +func (p Parameter) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.ParamProps, token) + return r, err +} + +// WithDescription a fluent builder method for the description of the parameter +func (p *Parameter) WithDescription(description string) *Parameter { + p.Description = description + return p +} + +// Named a fluent builder method to override the name of the parameter +func (p *Parameter) Named(name string) *Parameter { + p.Name = name + return p +} + +// WithLocation a fluent builder method to override the location of the parameter +func (p *Parameter) WithLocation(in string) *Parameter { + p.In = in + return p +} + +// Typed a fluent builder method for the type of the parameter value +func (p *Parameter) Typed(tpe, format string) *Parameter { + p.Type = tpe + p.Format = format + return p +} + +// CollectionOf a fluent builder method for an array parameter +func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { + p.Type = jsonArray + p.Items = items + p.CollectionFormat = format + return p +} + +// WithDefault sets the default value on this parameter +func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { + p.AsOptional() // with default implies optional + p.Default = defaultValue + return p +} + +// AllowsEmptyValues flags this parameter as being ok with empty values +func (p *Parameter) AllowsEmptyValues() *Parameter { + p.AllowEmptyValue = true + return p +} + +// NoEmptyValues flags this parameter as not liking empty values +func (p *Parameter) NoEmptyValues() *Parameter { + p.AllowEmptyValue = false + return p +} + +// AsOptional flags this parameter as optional +func (p *Parameter) AsOptional() *Parameter { + p.Required = false + return p +} + +// AsRequired flags this parameter as required +func (p *Parameter) AsRequired() *Parameter { + if p.Default != nil { // with a default required makes no sense + return p + } + p.Required = true + return p +} + +// WithMaxLength sets a max length value +func (p *Parameter) WithMaxLength(max int64) *Parameter { + p.MaxLength = &max + return p +} + +// WithMinLength sets a min length value +func (p *Parameter) WithMinLength(min int64) *Parameter { + p.MinLength = &min + return p +} + +// WithPattern sets a pattern value +func (p *Parameter) WithPattern(pattern string) *Parameter { + p.Pattern = pattern + return p +} + +// WithMultipleOf sets a multiple of value +func (p *Parameter) WithMultipleOf(number float64) *Parameter { + p.MultipleOf = &number + return p +} + +// WithMaximum sets a maximum number value +func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { + p.Maximum = &max + p.ExclusiveMaximum = exclusive + return p +} + +// WithMinimum sets a minimum number value +func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { + p.Minimum = &min + p.ExclusiveMinimum = exclusive + return p +} + +// WithEnum sets a the enum values (replace) +func (p *Parameter) WithEnum(values ...interface{}) *Parameter { + p.Enum = append([]interface{}{}, values...) + return p +} + +// WithMaxItems sets the max items +func (p *Parameter) WithMaxItems(size int64) *Parameter { + p.MaxItems = &size + return p +} + +// WithMinItems sets the min items +func (p *Parameter) WithMinItems(size int64) *Parameter { + p.MinItems = &size + return p +} + +// UniqueValues dictates that this array can only have unique items +func (p *Parameter) UniqueValues() *Parameter { + p.UniqueItems = true + return p +} + +// AllowDuplicates this array can have duplicates +func (p *Parameter) AllowDuplicates() *Parameter { + p.UniqueItems = false + return p +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Parameter) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.ParamProps) +} + +// MarshalJSON converts this items object to JSON +func (p Parameter) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.ParamProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b3, b1, b2, b4, b5), nil +} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go new file mode 100644 index 000000000000..68fc8e90144e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -0,0 +1,87 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// PathItemProps the path item specific properties +type PathItemProps struct { + Get *Operation `json:"get,omitempty"` + Put *Operation `json:"put,omitempty"` + Post *Operation `json:"post,omitempty"` + Delete *Operation `json:"delete,omitempty"` + Options *Operation `json:"options,omitempty"` + Head *Operation `json:"head,omitempty"` + Patch *Operation `json:"patch,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` +} + +// PathItem describes the operations available on a single path. +// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// The path itself is still exposed to the documentation viewer but they will +// not know which operations and parameters are available. +// +// For more information: http://goo.gl/8us55a#pathItemObject +type PathItem struct { + Refable + VendorExtensible + PathItemProps +} + +// JSONLookup look up a value by the json property name +func (p PathItem) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == jsonRef { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *PathItem) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + return json.Unmarshal(data, &p.PathItemProps) +} + +// MarshalJSON converts this items object to JSON +func (p PathItem) MarshalJSON() ([]byte, error) { + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.PathItemProps) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b3, b4, b5) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go new file mode 100644 index 000000000000..9dc82a2901d6 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/paths.go @@ -0,0 +1,97 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/swag" +) + +// Paths holds the relative paths to the individual endpoints. +// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order +// to construct the full URL. +// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// +// For more information: http://goo.gl/8us55a#pathsObject +type Paths struct { + VendorExtensible + Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" +} + +// JSONLookup look up a value by the json property name +func (p Paths) JSONLookup(token string) (interface{}, error) { + if pi, ok := p.Paths[token]; ok { + return &pi, nil + } + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]interface{}) + } + var d interface{} + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + var pi PathItem + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Paths) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + + pths := make(map[string]PathItem) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go new file mode 100644 index 000000000000..08ff869b2fcd --- /dev/null +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -0,0 +1,191 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "net/http" + "os" + "path/filepath" + + "github.com/go-openapi/jsonreference" +) + +// Refable is a struct for things that accept a $ref property +type Refable struct { + Ref Ref +} + +// MarshalJSON marshals the ref to json +func (r Refable) MarshalJSON() ([]byte, error) { + return r.Ref.MarshalJSON() +} + +// UnmarshalJSON unmarshalss the ref from json +func (r *Refable) UnmarshalJSON(d []byte) error { + return json.Unmarshal(d, &r.Ref) +} + +// Ref represents a json reference that is potentially resolved +type Ref struct { + jsonreference.Ref +} + +// RemoteURI gets the remote uri part of the ref +func (r *Ref) RemoteURI() string { + if r.String() == "" { + return r.String() + } + + u := *r.GetURL() + u.Fragment = "" + return u.String() +} + +// IsValidURI returns true when the url the ref points to can be found +func (r *Ref) IsValidURI(basepaths ...string) bool { + if r.String() == "" { + return true + } + + v := r.RemoteURI() + if v == "" { + return true + } + + if r.HasFullURL { + rr, err := http.Get(v) + if err != nil { + return false + } + + return rr.StatusCode/100 == 2 + } + + if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { + return false + } + + // check for local file + pth := v + if r.HasURLPathOnly { + base := "." + if len(basepaths) > 0 { + base = filepath.Dir(filepath.Join(basepaths...)) + } + p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) + if e != nil { + return false + } + pth = p + } + + fi, err := os.Stat(filepath.ToSlash(pth)) + if err != nil { + return false + } + + return !fi.IsDir() +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + ref, err := r.Ref.Inherits(child.Ref) + if err != nil { + return nil, err + } + return &Ref{Ref: *ref}, nil +} + +// NewRef creates a new instance of a ref object +// returns an error when the reference uri is an invalid uri +func NewRef(refURI string) (Ref, error) { + ref, err := jsonreference.New(refURI) + if err != nil { + return Ref{}, err + } + return Ref{Ref: ref}, nil +} + +// MustCreateRef creates a ref object but panics when refURI is invalid. +// Use the NewRef method for a version that returns an error. +func MustCreateRef(refURI string) Ref { + return Ref{Ref: jsonreference.MustCreateRef(refURI)} +} + +// MarshalJSON marshals this ref into a JSON object +func (r Ref) MarshalJSON() ([]byte, error) { + str := r.String() + if str == "" { + if r.IsRoot() { + return []byte(`{"$ref":""}`), nil + } + return []byte("{}"), nil + } + v := map[string]interface{}{"$ref": str} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshals this ref from a JSON object +func (r *Ref) UnmarshalJSON(d []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(d, &v); err != nil { + return err + } + return r.fromMap(v) +} + +// GobEncode provides a safe gob encoder for Ref +func (r Ref) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw, err := r.MarshalJSON() + if err != nil { + return nil, err + } + err = gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Ref +func (r *Ref) GobDecode(b []byte) error { + var raw []byte + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + return json.Unmarshal(raw, r) +} + +func (r *Ref) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *r = Ref{Ref: ref} + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go new file mode 100644 index 000000000000..27729c1d93b1 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/response.go @@ -0,0 +1,131 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// ResponseProps properties specific to a response +type ResponseProps struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + +// Response describes a single response from an API Operation. +// +// For more information: http://goo.gl/8us55a#responseObject +type Response struct { + Refable + ResponseProps + VendorExtensible +} + +// JSONLookup look up a value by the json property name +func (r Response) JSONLookup(token string) (interface{}, error) { + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &r.Ref, nil + } + ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) + return ptr, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Response) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + return json.Unmarshal(data, &r.VendorExtensible) +} + +// MarshalJSON converts this items object to JSON +func (r Response) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponseProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + b3, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// NewResponse creates a new response instance +func NewResponse() *Response { + return new(Response) +} + +// ResponseRef creates a response as a json reference +func ResponseRef(url string) *Response { + resp := NewResponse() + resp.Ref = MustCreateRef(url) + return resp +} + +// WithDescription sets the description on this response, allows for chaining +func (r *Response) WithDescription(description string) *Response { + r.Description = description + return r +} + +// WithSchema sets the schema on this response, allows for chaining. +// Passing a nil argument removes the schema from this response +func (r *Response) WithSchema(schema *Schema) *Response { + r.Schema = schema + return r +} + +// AddHeader adds a header to this response +func (r *Response) AddHeader(name string, header *Header) *Response { + if header == nil { + return r.RemoveHeader(name) + } + if r.Headers == nil { + r.Headers = make(map[string]Header) + } + r.Headers[name] = *header + return r +} + +// RemoveHeader removes a header from this response +func (r *Response) RemoveHeader(name string) *Response { + delete(r.Headers, name) + return r +} + +// AddExample adds an example to this response +func (r *Response) AddExample(mediaType string, example interface{}) *Response { + if r.Examples == nil { + r.Examples = make(map[string]interface{}) + } + r.Examples[mediaType] = example + return r +} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go new file mode 100644 index 000000000000..4efb6f868bd0 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -0,0 +1,127 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "github.com/go-openapi/swag" +) + +// Responses is a container for the expected responses of an operation. +// The container maps a HTTP response code to the expected response. +// It is not expected from the documentation to necessarily cover all possible HTTP response codes, +// since they may not be known in advance. However, it is expected from the documentation to cover +// a successful operation response and any known errors. +// +// The `default` can be used a default response object for all HTTP codes that are not covered +// individually by the specification. +// +// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response +// for a successful operation call. +// +// For more information: http://goo.gl/8us55a#responsesObject +type Responses struct { + VendorExtensible + ResponsesProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (r Responses) JSONLookup(token string) (interface{}, error) { + if token == "default" { + return r.Default, nil + } + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if i, err := strconv.Atoi(token); err == nil { + if scr, ok := r.StatusCodeResponses[i]; ok { + return scr, nil + } + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Responses) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { + r.ResponsesProps = ResponsesProps{} + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Responses) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// ResponsesProps describes all responses for an operation. +// It tells what is the default response and maps all responses with a +// HTTP status code. +type ResponsesProps struct { + Default *Response + StatusCodeResponses map[int]Response +} + +// MarshalJSON marshals responses as JSON +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]Response{} + if r.Default != nil { + toser["default"] = *r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +// UnmarshalJSON unmarshals responses from JSON +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + var res map[string]Response + if err := json.Unmarshal(data, &res); err != nil { + return nil + } + if v, ok := res["default"]; ok { + r.Default = &v + delete(res, "default") + } + for k, v := range res { + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = v + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go new file mode 100644 index 000000000000..37858ece9098 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -0,0 +1,596 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// BooleanProperty creates a boolean property +func BooleanProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} +} + +// BoolProperty creates a boolean property +func BoolProperty() *Schema { return BooleanProperty() } + +// StringProperty creates a string property +func StringProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// CharProperty creates a string property +func CharProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// Float64Property creates a float64/double property +func Float64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} +} + +// Float32Property creates a float32/float property +func Float32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} +} + +// Int8Property creates an int8 property +func Int8Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} +} + +// Int16Property creates an int16 property +func Int16Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} +} + +// Int32Property creates an int32 property +func Int32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} +} + +// Int64Property creates an int64 property +func Int64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} +} + +// StrFmtProperty creates a property for the named string format +func StrFmtProperty(format string) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} +} + +// DateProperty creates a date property +func DateProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} +} + +// DateTimeProperty creates a date time property +func DateTimeProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} +} + +// MapProperty creates a map property +func MapProperty(property *Schema) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, + AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} +} + +// RefProperty creates a ref property +func RefProperty(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// RefSchema creates a ref property +func RefSchema(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// ArrayProperty creates an array property +func ArrayProperty(items *Schema) *Schema { + if items == nil { + return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} + } + return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} +} + +// ComposedSchema creates a schema with allOf +func ComposedSchema(schemas ...Schema) *Schema { + s := new(Schema) + s.AllOf = schemas + return s +} + +// SchemaURL represents a schema url +type SchemaURL string + +// MarshalJSON marshal this to JSON +func (r SchemaURL) MarshalJSON() ([]byte, error) { + if r == "" { + return []byte("{}"), nil + } + v := map[string]interface{}{"$schema": string(r)} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshal this from JSON +func (r *SchemaURL) UnmarshalJSON(data []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + return r.fromMap(v) +} + +func (r *SchemaURL) fromMap(v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$schema"]; ok { + if str, ok := vv.(string); ok { + u, err := url.Parse(str) + if err != nil { + return err + } + + *r = SchemaURL(u.String()) + } + } + return nil +} + +// SchemaProps describes a JSON schema (draft 4) +type SchemaProps struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitempty"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitempty"` + Properties map[string]Schema `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` + PatternProperties map[string]Schema `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` + Definitions Definitions `json:"definitions,omitempty"` +} + +// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) +type SwaggerSchemaProps struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitempty"` + XML *XMLObject `json:"xml,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// Schema the schema object allows the definition of input and output data types. +// These types can be objects, but also primitives and arrays. +// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) +// and uses a predefined subset of it. +// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. +// +// For more information: http://goo.gl/8us55a#schemaObject +type Schema struct { + VendorExtensible + SchemaProps + SwaggerSchemaProps + ExtraProps map[string]interface{} `json:"-"` +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s Schema) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + if ex, ok := s.ExtraProps[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) + if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { + return r, err + } + r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) + return r, err +} + +// WithID sets the id for this schema, allows for chaining +func (s *Schema) WithID(id string) *Schema { + s.ID = id + return s +} + +// WithTitle sets the title for this schema, allows for chaining +func (s *Schema) WithTitle(title string) *Schema { + s.Title = title + return s +} + +// WithDescription sets the description for this schema, allows for chaining +func (s *Schema) WithDescription(description string) *Schema { + s.Description = description + return s +} + +// WithProperties sets the properties for this schema +func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { + s.Properties = schemas + return s +} + +// SetProperty sets a property on this schema +func (s *Schema) SetProperty(name string, schema Schema) *Schema { + if s.Properties == nil { + s.Properties = make(map[string]Schema) + } + s.Properties[name] = schema + return s +} + +// WithAllOf sets the all of property +func (s *Schema) WithAllOf(schemas ...Schema) *Schema { + s.AllOf = schemas + return s +} + +// WithMaxProperties sets the max number of properties an object can have +func (s *Schema) WithMaxProperties(max int64) *Schema { + s.MaxProperties = &max + return s +} + +// WithMinProperties sets the min number of properties an object must have +func (s *Schema) WithMinProperties(min int64) *Schema { + s.MinProperties = &min + return s +} + +// Typed sets the type of this schema for a single value item +func (s *Schema) Typed(tpe, format string) *Schema { + s.Type = []string{tpe} + s.Format = format + return s +} + +// AddType adds a type with potential format to the types for this schema +func (s *Schema) AddType(tpe, format string) *Schema { + s.Type = append(s.Type, tpe) + if format != "" { + s.Format = format + } + return s +} + +// AsNullable flags this schema as nullable. +func (s *Schema) AsNullable() *Schema { + s.Nullable = true + return s +} + +// CollectionOf a fluent builder method for an array parameter +func (s *Schema) CollectionOf(items Schema) *Schema { + s.Type = []string{jsonArray} + s.Items = &SchemaOrArray{Schema: &items} + return s +} + +// WithDefault sets the default value on this parameter +func (s *Schema) WithDefault(defaultValue interface{}) *Schema { + s.Default = defaultValue + return s +} + +// WithRequired flags this parameter as required +func (s *Schema) WithRequired(items ...string) *Schema { + s.Required = items + return s +} + +// AddRequired adds field names to the required properties array +func (s *Schema) AddRequired(items ...string) *Schema { + s.Required = append(s.Required, items...) + return s +} + +// WithMaxLength sets a max length value +func (s *Schema) WithMaxLength(max int64) *Schema { + s.MaxLength = &max + return s +} + +// WithMinLength sets a min length value +func (s *Schema) WithMinLength(min int64) *Schema { + s.MinLength = &min + return s +} + +// WithPattern sets a pattern value +func (s *Schema) WithPattern(pattern string) *Schema { + s.Pattern = pattern + return s +} + +// WithMultipleOf sets a multiple of value +func (s *Schema) WithMultipleOf(number float64) *Schema { + s.MultipleOf = &number + return s +} + +// WithMaximum sets a maximum number value +func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { + s.Maximum = &max + s.ExclusiveMaximum = exclusive + return s +} + +// WithMinimum sets a minimum number value +func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { + s.Minimum = &min + s.ExclusiveMinimum = exclusive + return s +} + +// WithEnum sets a the enum values (replace) +func (s *Schema) WithEnum(values ...interface{}) *Schema { + s.Enum = append([]interface{}{}, values...) + return s +} + +// WithMaxItems sets the max items +func (s *Schema) WithMaxItems(size int64) *Schema { + s.MaxItems = &size + return s +} + +// WithMinItems sets the min items +func (s *Schema) WithMinItems(size int64) *Schema { + s.MinItems = &size + return s +} + +// UniqueValues dictates that this array can only have unique items +func (s *Schema) UniqueValues() *Schema { + s.UniqueItems = true + return s +} + +// AllowDuplicates this array can have duplicates +func (s *Schema) AllowDuplicates() *Schema { + s.UniqueItems = false + return s +} + +// AddToAllOf adds a schema to the allOf property +func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { + s.AllOf = append(s.AllOf, schemas...) + return s +} + +// WithDiscriminator sets the name of the discriminator field +func (s *Schema) WithDiscriminator(discriminator string) *Schema { + s.Discriminator = discriminator + return s +} + +// AsReadOnly flags this schema as readonly +func (s *Schema) AsReadOnly() *Schema { + s.ReadOnly = true + return s +} + +// AsWritable flags this schema as writeable (not read-only) +func (s *Schema) AsWritable() *Schema { + s.ReadOnly = false + return s +} + +// WithExample sets the example for this schema +func (s *Schema) WithExample(example interface{}) *Schema { + s.Example = example + return s +} + +// WithExternalDocs sets/removes the external docs for/from this schema. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (s *Schema) WithExternalDocs(description, url string) *Schema { + if description == "" && url == "" { + s.ExternalDocs = nil + return s + } + + if s.ExternalDocs == nil { + s.ExternalDocs = &ExternalDocumentation{} + } + s.ExternalDocs.Description = description + s.ExternalDocs.URL = url + return s +} + +// WithXMLName sets the xml name for the object +func (s *Schema) WithXMLName(name string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Name = name + return s +} + +// WithXMLNamespace sets the xml namespace for the object +func (s *Schema) WithXMLNamespace(namespace string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Namespace = namespace + return s +} + +// WithXMLPrefix sets the xml prefix for the object +func (s *Schema) WithXMLPrefix(prefix string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Prefix = prefix + return s +} + +// AsXMLAttribute flags this object as xml attribute +func (s *Schema) AsXMLAttribute() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = true + return s +} + +// AsXMLElement flags this object as an xml node +func (s *Schema) AsXMLElement() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = false + return s +} + +// AsWrappedXML flags this object as wrapped, this is mostly useful for array types +func (s *Schema) AsWrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = true + return s +} + +// AsUnwrappedXML flags this object as an xml node +func (s *Schema) AsUnwrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = false + return s +} + +// MarshalJSON marshal this to JSON +func (s Schema) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SchemaProps) + if err != nil { + return nil, fmt.Errorf("schema props %v", err) + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, fmt.Errorf("vendor props %v", err) + } + b3, err := s.Ref.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("ref prop %v", err) + } + b4, err := s.Schema.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("schema prop %v", err) + } + b5, err := json.Marshal(s.SwaggerSchemaProps) + if err != nil { + return nil, fmt.Errorf("common validations %v", err) + } + var b6 []byte + if s.ExtraProps != nil { + jj, err := json.Marshal(s.ExtraProps) + if err != nil { + return nil, fmt.Errorf("extra props %v", err) + } + b6 = jj + } + return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *Schema) UnmarshalJSON(data []byte) error { + props := struct { + SchemaProps + SwaggerSchemaProps + }{} + if err := json.Unmarshal(data, &props); err != nil { + return err + } + + sch := Schema{ + SchemaProps: props.SchemaProps, + SwaggerSchemaProps: props.SwaggerSchemaProps, + } + + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + _ = sch.Ref.fromMap(d) + _ = sch.Schema.fromMap(d) + + delete(d, "$ref") + delete(d, "$schema") + for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { + delete(d, pn) + } + + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if sch.Extensions == nil { + sch.Extensions = map[string]interface{}{} + } + sch.Extensions[k] = vv + continue + } + if sch.ExtraProps == nil { + sch.ExtraProps = map[string]interface{}{} + } + sch.ExtraProps[k] = vv + } + + *s = sch + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go new file mode 100644 index 000000000000..c34a96fa04e7 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -0,0 +1,275 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "log" + "net/url" + "reflect" + "strings" + + "github.com/go-openapi/swag" +) + +// PathLoader function to use when loading remote refs +var PathLoader func(string) (json.RawMessage, error) + +func init() { + PathLoader = func(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil + } +} + +// resolverContext allows to share a context during spec processing. +// At the moment, it just holds the index of circular references found. +type resolverContext struct { + // circulars holds all visited circular references, which allows shortcuts. + // NOTE: this is not just a performance improvement: it is required to figure out + // circular references which participate several cycles. + // This structure is privately instantiated and needs not be locked against + // concurrent access, unless we chose to implement a parallel spec walking. + circulars map[string]bool + basePath string +} + +func newResolverContext(originalBasePath string) *resolverContext { + return &resolverContext{ + circulars: make(map[string]bool), + basePath: originalBasePath, // keep the root base path in context + } +} + +type schemaLoader struct { + root interface{} + options *ExpandOptions + cache ResolutionCache + context *resolverContext + loadDoc func(string) (json.RawMessage, error) +} + +func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) (*schemaLoader, error) { + if ref.IsRoot() || ref.HasFragmentOnly { + return r, nil + } + + baseRef, _ := NewRef(basePath) + currentRef := normalizeFileRef(&ref, basePath) + if strings.HasPrefix(currentRef.String(), baseRef.String()) { + return r, nil + } + + // Set a new root to resolve against + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := r.cache.Get(rootURL.String()) + + // shallow copy of resolver options to set a new RelativeBase when + // traversing multiple documents + newOptions := r.options + newOptions.RelativeBase = rootURL.String() + debugLog("setting new root: %s", newOptions.RelativeBase) + resolver, err := defaultSchemaLoader(root, newOptions, r.cache, r.context) + if err != nil { + return nil, err + } + + return resolver, nil +} + +func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { + if transitive != r { + debugLog("got a new resolver") + if transitive.options != nil && transitive.options.RelativeBase != "" { + basePath, _ = absPath(transitive.options.RelativeBase) + debugLog("new basePath = %s", basePath) + } + } + return basePath +} + +func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return fmt.Errorf("resolve ref: target needs to be a pointer") + } + + refURL := ref.GetURL() + if refURL == nil { + return nil + } + + var res interface{} + var data interface{} + var err error + // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means + // it is pointing somewhere in the root. + root := r.root + if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { + if baseRef, erb := NewRef(basePath); erb == nil { + root, _, _, _ = r.load(baseRef.GetURL()) + } + } + if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { + data = root + } else { + baseRef := normalizeFileRef(ref, basePath) + debugLog("current ref is: %s", ref.String()) + debugLog("current ref normalized file: %s", baseRef.String()) + data, _, _, err = r.load(baseRef.GetURL()) + if err != nil { + return err + } + } + + res = data + if ref.String() != "" { + res, _, err = ref.GetPointer().Get(data) + if err != nil { + return err + } + } + return swag.DynamicJSONToStruct(res, target) +} + +func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + debugLog("loading schema from url: %s", refURL) + toFetch := *refURL + toFetch.Fragment = "" + + normalized := normalizeAbsPath(toFetch.String()) + + data, fromCache := r.cache.Get(normalized) + if !fromCache { + b, err := r.loadDoc(normalized) + if err != nil { + return nil, url.URL{}, false, err + } + + if err := json.Unmarshal(b, &data); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(normalized, data) + } + + return data, toFetch, fromCache, nil +} + +// isCircular detects cycles in sequences of $ref. +// It relies on a private context (which needs not be locked). +func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { + normalizedRef := normalizePaths(ref.String(), basePath) + if _, ok := r.context.circulars[normalizedRef]; ok { + // circular $ref has been already detected in another explored cycle + foundCycle = true + return + } + foundCycle = swag.ContainsStringsCI(parentRefs, normalizedRef) + if foundCycle { + r.context.circulars[normalizedRef] = true + } + return +} + +// Resolve resolves a reference against basePath and stores the result in target +// Resolve is not in charge of following references, it only resolves ref by following its URL +// if the schema that ref is referring to has more refs in it. Resolve doesn't resolve them +// if basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct +func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { + return r.resolveRef(ref, target, basePath) +} + +func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { + var ref *Ref + switch refable := input.(type) { + case *Schema: + ref = &refable.Ref + case *Parameter: + ref = &refable.Ref + case *Response: + ref = &refable.Ref + case *PathItem: + ref = &refable.Ref + default: + return fmt.Errorf("deref: unsupported type %T", input) + } + + curRef := ref.String() + if curRef != "" { + normalizedRef := normalizeFileRef(ref, basePath) + normalizedBasePath := normalizedRef.RemoteURI() + + if r.isCircular(normalizedRef, basePath, parentRefs...) { + return nil + } + + if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { + return err + } + + // NOTE(fredbi): removed basePath check => needs more testing + if ref.String() != "" && ref.String() != curRef { + parentRefs = append(parentRefs, normalizedRef.String()) + return r.deref(input, parentRefs, normalizedBasePath) + } + } + + return nil +} + +func (r *schemaLoader) shouldStopOnError(err error) bool { + if err != nil && !r.options.ContinueOnError { + return true + } + + if err != nil { + log.Println(err) + } + + return false +} + +func defaultSchemaLoader( + root interface{}, + expandOptions *ExpandOptions, + cache ResolutionCache, + context *resolverContext) (*schemaLoader, error) { + + if cache == nil { + cache = resCache + } + if expandOptions == nil { + expandOptions = &ExpandOptions{} + } + absBase, _ := absPath(expandOptions.RelativeBase) + if context == nil { + context = newResolverContext(absBase) + } + return &schemaLoader{ + root: root, + options: expandOptions, + cache: cache, + context: context, + loadDoc: func(path string) (json.RawMessage, error) { + debugLog("fetching document at %q", path) + return PathLoader(path) + }, + }, nil +} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go new file mode 100644 index 000000000000..fe353842a6fc --- /dev/null +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -0,0 +1,140 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + basic = "basic" + apiKey = "apiKey" + oauth2 = "oauth2" + implicit = "implicit" + password = "password" + application = "application" + accessCode = "accessCode" +) + +// BasicAuth creates a basic auth security scheme +func BasicAuth() *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} +} + +// APIKeyAuth creates an api key auth security scheme +func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} +} + +// OAuth2Implicit creates an implicit flow oauth2 security scheme +func OAuth2Implicit(authorizationURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: implicit, + AuthorizationURL: authorizationURL, + }} +} + +// OAuth2Password creates a password flow oauth2 security scheme +func OAuth2Password(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: password, + TokenURL: tokenURL, + }} +} + +// OAuth2Application creates an application flow oauth2 security scheme +func OAuth2Application(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: application, + TokenURL: tokenURL, + }} +} + +// OAuth2AccessToken creates an access token flow oauth2 security scheme +func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: accessCode, + AuthorizationURL: authorizationURL, + TokenURL: tokenURL, + }} +} + +// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section +type SecuritySchemeProps struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 +} + +// AddScope adds a scope to this security scheme +func (s *SecuritySchemeProps) AddScope(scope, description string) { + if s.Scopes == nil { + s.Scopes = make(map[string]string) + } + s.Scopes[scope] = description +} + +// SecurityScheme allows the definition of a security scheme that can be used by the operations. +// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) +// and OAuth2's common flows (implicit, password, application and access code). +// +// For more information: http://goo.gl/8us55a#securitySchemeObject +type SecurityScheme struct { + VendorExtensible + SecuritySchemeProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (s SecurityScheme) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SecuritySchemeProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + return json.Unmarshal(data, &s.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go new file mode 100644 index 000000000000..0bb045bc06ad --- /dev/null +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -0,0 +1,86 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import "encoding/json" + +//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json +//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema +//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... +//go:generate perl -pi -e s,Json,JSON,g bindata.go + +const ( + // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs + SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" + // JSONSchemaURL the url for the json schema schema + JSONSchemaURL = "http://json-schema.org/draft-04/schema#" +) + +var ( + jsonSchema *Schema + swaggerSchema *Schema +) + +func init() { + jsonSchema = MustLoadJSONSchemaDraft04() + swaggerSchema = MustLoadSwagger20Schema() +} + +// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error +func MustLoadJSONSchemaDraft04() *Schema { + d, e := JSONSchemaDraft04() + if e != nil { + panic(e) + } + return d +} + +// JSONSchemaDraft04 loads the json schema document for json shema draft04 +func JSONSchemaDraft04() (*Schema, error) { + b, err := Asset("jsonschema-draft-04.json") + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} + +// MustLoadSwagger20Schema panics when Swagger20Schema returns an error +func MustLoadSwagger20Schema() *Schema { + d, e := Swagger20Schema() + if e != nil { + panic(e) + } + return d +} + +// Swagger20Schema loads the swagger 2.0 schema from the embedded assets +func Swagger20Schema() (*Schema, error) { + + b, err := Asset("v2/schema.json") + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go new file mode 100644 index 000000000000..44722ffd5adc --- /dev/null +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -0,0 +1,448 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "fmt" + "strconv" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Swagger this is the root document object for the API specification. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) +// together into one document. +// +// For more information: http://goo.gl/8us55a#swagger-object- +type Swagger struct { + VendorExtensible + SwaggerProps +} + +// JSONLookup look up a value by the json property name +func (s Swagger) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) + return r, err +} + +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SwaggerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON unmarshals a swagger spec from json +func (s *Swagger) UnmarshalJSON(data []byte) error { + var sw Swagger + if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { + return err + } + *s = sw + return nil +} + +// GobEncode provides a safe gob encoder for Swagger, including extensions +func (s Swagger) GobEncode() ([]byte, error) { + var b bytes.Buffer + raw := struct { + Props SwaggerProps + Ext VendorExtensible + }{ + Props: s.SwaggerProps, + Ext: s.VendorExtensible, + } + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for Swagger, including extensions +func (s *Swagger) GobDecode(b []byte) error { + var raw struct { + Props SwaggerProps + Ext VendorExtensible + } + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + s.SwaggerProps = raw.Props + s.VendorExtensible = raw.Ext + return nil +} + +// SwaggerProps captures the top-level properties of an Api specification +// +// NOTE: validation rules +// - the scheme, when present must be from [http, https, ws, wss] +// - BasePath must start with a leading "/" +// - Paths is required +type SwaggerProps struct { + ID string `json:"id,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Swagger string `json:"swagger,omitempty"` + Info *Info `json:"info,omitempty"` + Host string `json:"host,omitempty"` + BasePath string `json:"basePath,omitempty"` + Paths *Paths `json:"paths"` + Definitions Definitions `json:"definitions,omitempty"` + Parameters map[string]Parameter `json:"parameters,omitempty"` + Responses map[string]Response `json:"responses,omitempty"` + SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Tags []Tag `json:"tags,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +type swaggerPropsAlias SwaggerProps + +type gobSwaggerPropsAlias struct { + Security []map[string]struct { + List []string + Pad bool + } + Alias *swaggerPropsAlias + SecurityIsEmpty bool +} + +// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements +func (o SwaggerProps) GobEncode() ([]byte, error) { + raw := gobSwaggerPropsAlias{ + Alias: (*swaggerPropsAlias)(&o), + } + + var b bytes.Buffer + if o.Security == nil { + // nil security requirement + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + if len(o.Security) == 0 { + // empty, but non-nil security requirement + raw.SecurityIsEmpty = true + raw.Alias.Security = nil + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err + } + + raw.Security = make([]map[string]struct { + List []string + Pad bool + }, 0, len(o.Security)) + for _, req := range o.Security { + v := make(map[string]struct { + List []string + Pad bool + }, len(req)) + for k, val := range req { + v[k] = struct { + List []string + Pad bool + }{ + List: val, + } + } + raw.Security = append(raw.Security, v) + } + + err := gob.NewEncoder(&b).Encode(raw) + return b.Bytes(), err +} + +// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements +func (o *SwaggerProps) GobDecode(b []byte) error { + var raw gobSwaggerPropsAlias + + buf := bytes.NewBuffer(b) + err := gob.NewDecoder(buf).Decode(&raw) + if err != nil { + return err + } + if raw.Alias == nil { + return nil + } + + switch { + case raw.SecurityIsEmpty: + // empty, but non-nil security requirement + raw.Alias.Security = []map[string][]string{} + case len(raw.Alias.Security) == 0: + // nil security requirement + raw.Alias.Security = nil + default: + raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) + for _, req := range raw.Security { + v := make(map[string][]string, len(req)) + for k, val := range req { + v[k] = make([]string, 0, len(val.List)) + v[k] = append(v[k], val.List...) + } + raw.Alias.Security = append(raw.Alias.Security, v) + } + } + + *o = *(*SwaggerProps)(raw.Alias) + return nil +} + +// Dependencies represent a dependencies property +type Dependencies map[string]SchemaOrStringArray + +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +type SchemaOrBool struct { + Allows bool + Schema *Schema +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { + if token == "allows" { + return s.Allows, nil + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +// UnmarshalJSON converts this bool or schema object from a JSON structure +func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + var nw SchemaOrBool + if len(data) >= 4 { + if data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + } + *s = nw + return nil +} + +// SchemaOrStringArray represents a schema or a string array +type SchemaOrStringArray struct { + Schema *Schema + Property []string +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return []byte("null"), nil +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw SchemaOrStringArray + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +// Definitions contains the models explicitly defined in this spec +// An object to hold data types that can be consumed and produced by operations. +// These data types can be primitives, arrays or models. +// +// For more information: http://goo.gl/8us55a#definitionsObject +type Definitions map[string]Schema + +// SecurityDefinitions a declaration of the security schemes available to be used in the specification. +// This does not enforce the security schemes on the operations and only serves to provide +// the relevant details for each scheme. +// +// For more information: http://goo.gl/8us55a#securityDefinitionsObject +type SecurityDefinitions map[string]*SecurityScheme + +// StringOrArray represents a value that can either be a string +// or an array of strings. Mainly here for serialization purposes +type StringOrArray []string + +// Contains returns true when the value is contained in the slice +func (s StringOrArray) Contains(value string) bool { + for _, str := range s { + if str == value { + return true + } + } + return false +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { + if _, err := strconv.Atoi(token); err == nil { + r, _, err := jsonpointer.GetForToken(s.Schemas, token) + return r, err + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + + if first == '[' { + var parsed []string + if err := json.Unmarshal(data, &parsed); err != nil { + return err + } + *s = StringOrArray(parsed) + return nil + } + + var single interface{} + if err := json.Unmarshal(data, &single); err != nil { + return err + } + if single == nil { + return nil + } + switch v := single.(type) { + case string: + *s = StringOrArray([]string{v}) + return nil + default: + return fmt.Errorf("only string or array is allowed, not %T", single) + } +} + +// MarshalJSON converts this string or array to a JSON array or JSON string +func (s StringOrArray) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal([]string(s)[0]) + } + return json.Marshal([]string(s)) +} + +// SchemaOrArray represents a value that can either be a Schema +// or an array of Schema. Mainly here for serialization purposes +type SchemaOrArray struct { + Schema *Schema + Schemas []Schema +} + +// Len returns the number of schemas in this property +func (s SchemaOrArray) Len() int { + if s.Schema != nil { + return 1 + } + return len(s.Schemas) +} + +// ContainsType returns true when one of the schemas is of the specified type +func (s *SchemaOrArray) ContainsType(name string) bool { + if s.Schema != nil { + return s.Schema.Type != nil && s.Schema.Type.Contains(name) + } + return false +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalJSON() ([]byte, error) { + if len(s.Schemas) > 0 { + return json.Marshal(s.Schemas) + } + return json.Marshal(s.Schema) +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + var nw SchemaOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Schemas); err != nil { + return err + } + } + *s = nw + return nil +} + +// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go new file mode 100644 index 000000000000..faa3d3de1eb4 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -0,0 +1,75 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// TagProps describe a tag entry in the top level tags section of a swagger spec +type TagProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// NewTag creates a new tag +func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { + return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} +} + +// Tag allows adding meta data to a single tag that is used by the +// [Operation Object](http://goo.gl/8us55a#operationObject). +// It is not mandatory to have a Tag Object per tag used there. +// +// For more information: http://goo.gl/8us55a#tagObject +type Tag struct { + VendorExtensible + TagProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (t Tag) JSONLookup(token string) (interface{}, error) { + if ex, ok := t.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(t.TagProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (t Tag) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(t.TagProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(t.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (t *Tag) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &t.TagProps); err != nil { + return err + } + return json.Unmarshal(data, &t.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/unused.go b/vendor/github.com/go-openapi/spec/unused.go new file mode 100644 index 000000000000..aa12b56f6e49 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/unused.go @@ -0,0 +1,174 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +/* + +import ( + "net/url" + "os" + "path" + "path/filepath" + + "github.com/go-openapi/jsonpointer" +) + + // Some currently unused functions and definitions that + // used to be part of the expander. + + // Moved here for the record and possible future reuse + +var ( + idPtr, _ = jsonpointer.New("/id") + refPtr, _ = jsonpointer.New("/$ref") +) + +func idFromNode(node interface{}) (*Ref, error) { + if idValue, _, err := idPtr.Get(node); err == nil { + if refStr, ok := idValue.(string); ok && refStr != "" { + idRef, err := NewRef(refStr) + if err != nil { + return nil, err + } + return &idRef, nil + } + } + return nil, nil +} + +func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { + if startingRef == nil { + return nil + } + + if ptr == nil { + return startingRef + } + + ret := startingRef + var idRef *Ref + node := startingNode + + for _, tok := range ptr.DecodedTokens() { + node, _, _ = jsonpointer.GetForToken(node, tok) + if node == nil { + break + } + + idRef, _ = idFromNode(node) + if idRef != nil { + nw, err := ret.Inherits(*idRef) + if err != nil { + break + } + ret = nw + } + + refRef, _, _ := refPtr.Get(node) + if refRef != nil { + var rf Ref + switch value := refRef.(type) { + case string: + rf, _ = NewRef(value) + } + nw, err := ret.Inherits(rf) + if err != nil { + break + } + nwURL := nw.GetURL() + if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") { + nwpt := filepath.ToSlash(nwURL.Path) + if filepath.IsAbs(nwpt) { + _, err := os.Stat(nwpt) + if err != nil { + nwURL.Path = filepath.Join(".", nwpt) + } + } + } + + ret = nw + } + + } + + return ret +} + +// basePathFromSchemaID returns a new basePath based on an existing basePath and a schema ID +func basePathFromSchemaID(oldBasePath, id string) string { + u, err := url.Parse(oldBasePath) + if err != nil { + panic(err) + } + uid, err := url.Parse(id) + if err != nil { + panic(err) + } + + if path.IsAbs(uid.Path) { + return id + } + u.Path = path.Join(path.Dir(u.Path), uid.Path) + return u.String() +} +*/ + +// type ExtraSchemaProps map[string]interface{} + +// // JSONSchema represents a structure that is a json schema draft 04 +// type JSONSchema struct { +// SchemaProps +// ExtraSchemaProps +// } + +// // MarshalJSON marshal this to JSON +// func (s JSONSchema) MarshalJSON() ([]byte, error) { +// b1, err := json.Marshal(s.SchemaProps) +// if err != nil { +// return nil, err +// } +// b2, err := s.Ref.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b3, err := s.Schema.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b4, err := json.Marshal(s.ExtraSchemaProps) +// if err != nil { +// return nil, err +// } +// return swag.ConcatJSON(b1, b2, b3, b4), nil +// } + +// // UnmarshalJSON marshal this from JSON +// func (s *JSONSchema) UnmarshalJSON(data []byte) error { +// var sch JSONSchema +// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Ref); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Schema); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { +// return err +// } +// *s = sch +// return nil +// } diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go new file mode 100644 index 000000000000..945a46703d55 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/xml_object.go @@ -0,0 +1,68 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// XMLObject a metadata object that allows for more fine-tuned XML model definitions. +// +// For more information: http://goo.gl/8us55a#xmlObject +type XMLObject struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Prefix string `json:"prefix,omitempty"` + Attribute bool `json:"attribute,omitempty"` + Wrapped bool `json:"wrapped,omitempty"` +} + +// WithName sets the xml name for the object +func (x *XMLObject) WithName(name string) *XMLObject { + x.Name = name + return x +} + +// WithNamespace sets the xml namespace for the object +func (x *XMLObject) WithNamespace(namespace string) *XMLObject { + x.Namespace = namespace + return x +} + +// WithPrefix sets the xml prefix for the object +func (x *XMLObject) WithPrefix(prefix string) *XMLObject { + x.Prefix = prefix + return x +} + +// AsAttribute flags this object as xml attribute +func (x *XMLObject) AsAttribute() *XMLObject { + x.Attribute = true + return x +} + +// AsElement flags this object as an xml node +func (x *XMLObject) AsElement() *XMLObject { + x.Attribute = false + return x +} + +// AsWrapped flags this object as wrapped, this is mostly useful for array types +func (x *XMLObject) AsWrapped() *XMLObject { + x.Wrapped = true + return x +} + +// AsUnwrapped flags this object as an xml node +func (x *XMLObject) AsUnwrapped() *XMLObject { + x.Wrapped = false + return x +} diff --git a/vendor/github.com/go-openapi/strfmt/.editorconfig b/vendor/github.com/go-openapi/strfmt/.editorconfig new file mode 100644 index 000000000000..3152da69a5d7 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.editorconfig @@ -0,0 +1,26 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true + +# Set default charset +[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] +charset = utf-8 + +# Tab indentation (no size specified) +[*.go] +indent_style = tab + +[*.md] +trim_trailing_whitespace = false + +# Matches the exact files either package.json or .travis.yml +[{package.json,.travis.yml}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore new file mode 100644 index 000000000000..dd91ed6a04e6 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml new file mode 100644 index 000000000000..f260ce7e5cd1 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -0,0 +1,30 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 31 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoinits + - gochecknoglobals + +issues: + exclude-rules: + - path: bson.go + text: "should be .*ObjectID" + linters: + - golint + diff --git a/vendor/github.com/go-openapi/strfmt/.travis.yml b/vendor/github.com/go-openapi/strfmt/.travis.yml new file mode 100644 index 000000000000..eb962aebcda8 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/.travis.yml @@ -0,0 +1,15 @@ +after_success: +- bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +language: go +env: +- GO111MODULE=on +notifications: + slack: + secure: zE5AtIYTpYfQPnTzP+EaQPN7JKtfFAGv6PrJqoIZLOXa8B6zGb6+J1JRNNxWi7faWbyJOxa4FSSsuPsKZMycUK6wlLFIdhDxwqeo7Ew8r6rdZKdfUHQggfNS9wO79ARoNYUDHtmnaBUS+eWSM1YqSc4i99QxyyfuURLOeAaA/q14YbdlTlaw3lrZ0qT92ot1FnVGNOx064zuHtFeUf+jAVRMZ6Q3rvqllwIlPszE6rmHGXBt2VoJxRaBetdwd7FgkcYw9FPXKHhadwC7/75ZAdmxIukhxNMw4Tr5NuPcqNcnbYLenDP7B3lssGVIrP4BRSqekS1d/tqvdvnnFWHMwrNCkSnSc065G5+qWTlXKAemIclgiXXqE2furBNLm05MDdG8fn5epS0UNarkjD+zX336RiqwBlOX4KbF+vPyqcO98CsN0lnd+H6loc9reiTHs37orFFpQ+309av9be2GGsHUsRB9ssIyrewmhAccOmkRtr2dVTZJNFQwa5Kph5TNJuTjnZEwG/xUkEX2YSfwShOsb062JWiflV6PJdnl80pc9Tn7D5sO5Bf9DbijGRJwwP+YiiJtwtr+vsvS+n4sM0b5eqm4UoRo+JJO8ffoJtHS7ItuyRbVQCwEPJ4221WLcf5PquEEDdAPwR+K4Gj8qTXqTDdxOiES1xFUKVgmzhI= +script: +- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..9322b065e37a --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/LICENSE b/vendor/github.com/go-openapi/strfmt/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md new file mode 100644 index 000000000000..87357cd024c7 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/README.md @@ -0,0 +1,73 @@ +# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/strfmt.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) + +This package exposes a registry of data types to support string formats in the go-openapi toolkit. + +strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. + +## Supported data formats +go-openapi/strfmt follows the swagger 2.0 specification with the following formats +defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). + +It also provides convenient extensions to go-openapi users. + +- [x] JSON-schema draft 4 formats + - date-time + - email + - hostname + - ipv4 + - ipv6 + - uri +- [x] swagger 2.0 format extensions + - binary + - byte (e.g. base64 encoded string) + - date (e.g. "1970-01-01") + - password +- [x] go-openapi custom format extensions + - bsonobjectid (BSON objectID) + - creditcard + - duration (e.g. "3 weeks", "1ms") + - hexcolor (e.g. "#FFFFFF") + - isbn, isbn10, isbn13 + - mac (e.g "01:02:03:04:05:06") + - rgbcolor (e.g. "rgb(100,100,100)") + - ssn + - uuid, uuid3, uuid4, uuid5 + - cidr (e.g. "192.0.2.1/24", "2001:db8:a0b:12f0::1/32") + +> NOTE: as the name stands for, this package is intended to support string formatting only. +> It does not provide validation for numerical values with swagger format extension for JSON types "number" or +> "integer" (e.g. float, double, int32...). + +## Format types +Types defined in strfmt expose marshaling and validation capabilities. + +List of defined types: +- Base64 +- CreditCard +- Date +- DateTime +- Duration +- Email +- HexColor +- Hostname +- IPv4 +- IPv6 +- CIDR +- ISBN +- ISBN10 +- ISBN13 +- MAC +- ObjectId +- Password +- RGBColor +- SSN +- URI +- UUID +- UUID3 +- UUID4 +- UUID5 diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go new file mode 100644 index 000000000000..c14961221825 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -0,0 +1,165 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "fmt" + + "go.mongodb.org/mongo-driver/bson" + + "go.mongodb.org/mongo-driver/bson/bsontype" + bsonprim "go.mongodb.org/mongo-driver/bson/primitive" +) + +func init() { + var id ObjectId + // register this format in the default registry + Default.Add("bsonobjectid", &id, IsBSONObjectID) +} + +// IsBSONObjectID returns true when the string is a valid BSON.ObjectId +func IsBSONObjectID(str string) bool { + _, err := bsonprim.ObjectIDFromHex(str) + return err == nil +} + +// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) +// +// swagger:strfmt bsonobjectid +type ObjectId bsonprim.ObjectID + +// NewObjectId creates a ObjectId from a Hex String +func NewObjectId(hex string) ObjectId { + oid, err := bsonprim.ObjectIDFromHex(hex) + if err != nil { + panic(err) + } + return ObjectId(oid) +} + +// MarshalText turns this instance into text +func (id ObjectId) MarshalText() ([]byte, error) { + oid := bsonprim.ObjectID(id) + if oid == bsonprim.NilObjectID { + return nil, nil + } + return []byte(oid.Hex()), nil +} + +// UnmarshalText hydrates this instance from text +func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on + if len(data) == 0 { + *id = ObjectId(bsonprim.NilObjectID) + return nil + } + oidstr := string(data) + oid, err := bsonprim.ObjectIDFromHex(oidstr) + if err != nil { + return err + } + *id = ObjectId(oid) + return nil +} + +// Scan read a value from a database driver +func (id *ObjectId) Scan(raw interface{}) error { + var data []byte + switch v := raw.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) + } + + return id.UnmarshalText(data) +} + +// Value converts a value to a database driver value +func (id ObjectId) Value() (driver.Value, error) { + return driver.Value(bsonprim.ObjectID(id).Hex()), nil +} + +func (id ObjectId) String() string { + return bsonprim.ObjectID(id).String() +} + +// MarshalJSON returns the ObjectId as JSON +func (id ObjectId) MarshalJSON() ([]byte, error) { + return bsonprim.ObjectID(id).MarshalJSON() +} + +// UnmarshalJSON sets the ObjectId from JSON +func (id *ObjectId) UnmarshalJSON(data []byte) error { + var obj bsonprim.ObjectID + if err := obj.UnmarshalJSON(data); err != nil { + return err + } + *id = ObjectId(obj) + return nil +} + +// MarshalBSON renders the object id as a BSON document +func (id ObjectId) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) +} + +// UnmarshalBSON reads the objectId from a BSON document +func (id *ObjectId) UnmarshalBSON(data []byte) error { + var obj struct { + Data bsonprim.ObjectID + } + if err := bson.Unmarshal(data, &obj); err != nil { + return err + } + *id = ObjectId(obj.Data) + return nil +} + +// MarshalBSONValue is an interface implemented by types that can marshal themselves +// into a BSON document represented as bytes. The bytes returned must be a valid +// BSON document if the error is nil. +func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { + oid := bsonprim.ObjectID(id) + return bsontype.ObjectID, oid[:], nil +} + +// UnmarshalBSONValue is an interface implemented by types that can unmarshal a +// BSON value representation of themselves. The BSON bytes and type can be +// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it +// wishes to retain the data after returning. +func (id *ObjectId) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { + var oid bsonprim.ObjectID + copy(oid[:], data) + *id = ObjectId(oid) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (id *ObjectId) DeepCopyInto(out *ObjectId) { + *out = *id +} + +// DeepCopy copies the receiver into a new ObjectId. +func (id *ObjectId) DeepCopy() *ObjectId { + if id == nil { + return nil + } + out := new(ObjectId) + id.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go new file mode 100644 index 000000000000..2959e6573038 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/date.go @@ -0,0 +1,153 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "time" + + "go.mongodb.org/mongo-driver/bson" +) + +func init() { + d := Date{} + // register this format in the default registry + Default.Add("date", &d, IsDate) +} + +// IsDate returns true when the string is a valid date +func IsDate(str string) bool { + _, err := time.Parse(RFC3339FullDate, str) + return err == nil +} + +const ( + // RFC3339FullDate represents a full-date as specified by RFC3339 + // See: http://goo.gl/xXOvVd + RFC3339FullDate = "2006-01-02" +) + +// Date represents a date from the API +// +// swagger:strfmt date +type Date time.Time + +// String converts this date into a string +func (d Date) String() string { + return time.Time(d).Format(RFC3339FullDate) +} + +// UnmarshalText parses a text representation into a date type +func (d *Date) UnmarshalText(text []byte) error { + if len(text) == 0 { + return nil + } + dd, err := time.Parse(RFC3339FullDate, string(text)) + if err != nil { + return err + } + *d = Date(dd) + return nil +} + +// MarshalText serializes this date type to string +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// Scan scans a Date value from database driver type. +func (d *Date) Scan(raw interface{}) error { + switch v := raw.(type) { + case []byte: + return d.UnmarshalText(v) + case string: + return d.UnmarshalText([]byte(v)) + case time.Time: + *d = Date(v) + return nil + case nil: + *d = Date{} + return nil + default: + return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v) + } +} + +// Value converts Date to a primitive value ready to written to a database. +func (d Date) Value() (driver.Value, error) { + return driver.Value(d.String()), nil +} + +// MarshalJSON returns the Date as JSON +func (d Date) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(d).Format(RFC3339FullDate)) +} + +// UnmarshalJSON sets the Date from JSON +func (d *Date) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var strdate string + if err := json.Unmarshal(data, &strdate); err != nil { + return err + } + tt, err := time.Parse(RFC3339FullDate, strdate) + if err != nil { + return err + } + *d = Date(tt) + return nil +} + +func (d Date) MarshalBSON() ([]byte, error) { + return bson.Marshal(bson.M{"data": d.String()}) +} + +func (d *Date) UnmarshalBSON(data []byte) error { + var m bson.M + if err := bson.Unmarshal(data, &m); err != nil { + return err + } + + if data, ok := m["data"].(string); ok { + rd, err := time.Parse(RFC3339FullDate, data) + if err != nil { + return err + } + *d = Date(rd) + return nil + } + + return errors.New("couldn't unmarshal bson bytes value as Date") +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (d *Date) DeepCopyInto(out *Date) { + *out = *d +} + +// DeepCopy copies the receiver into a new Date. +func (d *Date) DeepCopy() *Date { + if d == nil { + return nil + } + out := new(Date) + d.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go new file mode 100644 index 000000000000..fcf10a6a2512 --- /dev/null +++ b/vendor/github.com/go-openapi/strfmt/default.go @@ -0,0 +1,2032 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "database/sql/driver" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/mail" + "regexp" + "strings" + + "github.com/asaskevich/govalidator" + "go.mongodb.org/mongo-driver/bson" +) + +const ( + // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114 + // A string instance is valid against this attribute if it is a valid + // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. + // http://tools.ietf.org/html/rfc1034#section-3.5 + // ::= any one of the ten digits 0 through 9 + // var digit = /[0-9]/; + // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case + // var letter = /[a-zA-Z]/; + // ::= | + // var letDig = /[0-9a-zA-Z]/; + // ::= | "-" + // var letDigHyp = /[-0-9a-zA-Z]/; + // ::= | + // var ldhStr = /[-0-9a-zA-Z]+/; + //