diff --git a/Pipfile b/Pipfile index e8eec74c..ca39d591 100644 --- a/Pipfile +++ b/Pipfile @@ -27,7 +27,11 @@ rethinkdb = "==2.3" pickle-mixin = "==1.0.2" pytest-lazy-fixture = "~=0.6.3" +# run `pipenv install --dev` to get the packages below in your env [dev-packages] +aiohttp = "~=3.7" +matplotlib = "~=3.3" +pandas = "~=1.1" [requires] python_version = "3.8.5" diff --git a/Pipfile.lock b/Pipfile.lock index d4132433..420a4888 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "3072a96e6dca3f7c7221c3b832fec937c7efba6f6cd2bde47463d6f81d7096d9" + "sha256": "f541f6f453dac9239755418c9a84b39a645266cd74f7e7f80561008c770c60a8" }, "pipfile-spec": 6, "requires": { @@ -21,7 +21,6 @@ "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197", "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.4.0" }, "attrs": { @@ -29,7 +28,6 @@ "sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6", "sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==20.3.0" }, "certifi": { @@ -42,17 +40,16 @@ }, "chardet": { "hashes": [ - "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", - "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691" + "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa", + "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5" ], - "version": "==3.0.4" + "version": "==4.0.0" }, "click": { "hashes": [ "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a", "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==7.1.2" }, "clickclick": { @@ -71,48 +68,61 @@ "sha256:5439e9659a89c4380d93a07acfbf3380d70be4130574de8881e5f0dfec7ad0e2" ], "index": "pypi", - "markers": "python_version >= '3.6'", "version": "==2.7.0" }, "coverage": { "hashes": [ - "sha256:0203acd33d2298e19b57451ebb0bed0ab0c602e5cf5a818591b4918b1f97d516", - "sha256:0f313707cdecd5cd3e217fc68c78a960b616604b559e9ea60cc16795c4304259", - "sha256:1c6703094c81fa55b816f5ae542c6ffc625fec769f22b053adb42ad712d086c9", - "sha256:1d44bb3a652fed01f1f2c10d5477956116e9b391320c94d36c6bf13b088a1097", - "sha256:280baa8ec489c4f542f8940f9c4c2181f0306a8ee1a54eceba071a449fb870a0", - "sha256:29a6272fec10623fcbe158fdf9abc7a5fa032048ac1d8631f14b50fbfc10d17f", - "sha256:2b31f46bf7b31e6aa690d4c7a3d51bb262438c6dcb0d528adde446531d0d3bb7", - "sha256:2d43af2be93ffbad25dd959899b5b809618a496926146ce98ee0b23683f8c51c", - "sha256:381ead10b9b9af5f64646cd27107fb27b614ee7040bb1226f9c07ba96625cbb5", - "sha256:47a11bdbd8ada9b7ee628596f9d97fbd3851bd9999d398e9436bd67376dbece7", - "sha256:4d6a42744139a7fa5b46a264874a781e8694bb32f1d76d8137b68138686f1729", - "sha256:50691e744714856f03a86df3e2bff847c2acede4c191f9a1da38f088df342978", - "sha256:530cc8aaf11cc2ac7430f3614b04645662ef20c348dce4167c22d99bec3480e9", - "sha256:582ddfbe712025448206a5bc45855d16c2e491c2dd102ee9a2841418ac1c629f", - "sha256:63808c30b41f3bbf65e29f7280bf793c79f54fb807057de7e5238ffc7cc4d7b9", - "sha256:71b69bd716698fa62cd97137d6f2fdf49f534decb23a2c6fc80813e8b7be6822", - "sha256:7858847f2d84bf6e64c7f66498e851c54de8ea06a6f96a32a1d192d846734418", - "sha256:78e93cc3571fd928a39c0b26767c986188a4118edc67bc0695bc7a284da22e82", - "sha256:7f43286f13d91a34fadf61ae252a51a130223c52bfefb50310d5b2deb062cf0f", - "sha256:86e9f8cd4b0cdd57b4ae71a9c186717daa4c5a99f3238a8723f416256e0b064d", - "sha256:8f264ba2701b8c9f815b272ad568d555ef98dfe1576802ab3149c3629a9f2221", - "sha256:9342dd70a1e151684727c9c91ea003b2fb33523bf19385d4554f7897ca0141d4", - "sha256:9361de40701666b034c59ad9e317bae95c973b9ff92513dd0eced11c6adf2e21", - "sha256:9669179786254a2e7e57f0ecf224e978471491d660aaca833f845b72a2df3709", - "sha256:aac1ba0a253e17889550ddb1b60a2063f7474155465577caa2a3b131224cfd54", - "sha256:aef72eae10b5e3116bac6957de1df4d75909fc76d1499a53fb6387434b6bcd8d", - "sha256:bd3166bb3b111e76a4f8e2980fa1addf2920a4ca9b2b8ca36a3bc3dedc618270", - "sha256:c1b78fb9700fc961f53386ad2fd86d87091e06ede5d118b8a50dea285a071c24", - "sha256:c3888a051226e676e383de03bf49eb633cd39fc829516e5334e69b8d81aae751", - "sha256:c5f17ad25d2c1286436761b462e22b5020d83316f8e8fcb5deb2b3151f8f1d3a", - "sha256:c851b35fc078389bc16b915a0a7c1d5923e12e2c5aeec58c52f4aa8085ac8237", - "sha256:cb7df71de0af56000115eafd000b867d1261f786b5eebd88a0ca6360cccfaca7", - "sha256:cedb2f9e1f990918ea061f28a0f0077a07702e3819602d3507e2ff98c8d20636", - "sha256:e8caf961e1b1a945db76f1b5fa9c91498d15f545ac0ababbe575cfab185d3bd8" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", - "version": "==5.3" + "sha256:08b3ba72bd981531fd557f67beee376d6700fba183b167857038997ba30dd297", + "sha256:2757fa64e11ec12220968f65d086b7a29b6583d16e9a544c889b22ba98555ef1", + "sha256:3102bb2c206700a7d28181dbe04d66b30780cde1d1c02c5f3c165cf3d2489497", + "sha256:3498b27d8236057def41de3585f317abae235dd3a11d33e01736ffedb2ef8606", + "sha256:378ac77af41350a8c6b8801a66021b52da8a05fd77e578b7380e876c0ce4f528", + "sha256:38f16b1317b8dd82df67ed5daa5f5e7c959e46579840d77a67a4ceb9cef0a50b", + "sha256:3911c2ef96e5ddc748a3c8b4702c61986628bb719b8378bf1e4a6184bbd48fe4", + "sha256:3a3c3f8863255f3c31db3889f8055989527173ef6192a283eb6f4db3c579d830", + "sha256:3b14b1da110ea50c8bcbadc3b82c3933974dbeea1832e814aab93ca1163cd4c1", + "sha256:535dc1e6e68fad5355f9984d5637c33badbdc987b0c0d303ee95a6c979c9516f", + "sha256:6f61319e33222591f885c598e3e24f6a4be3533c1d70c19e0dc59e83a71ce27d", + "sha256:723d22d324e7997a651478e9c5a3120a0ecbc9a7e94071f7e1954562a8806cf3", + "sha256:76b2775dda7e78680d688daabcb485dc87cf5e3184a0b3e012e1d40e38527cc8", + "sha256:782a5c7df9f91979a7a21792e09b34a658058896628217ae6362088b123c8500", + "sha256:7e4d159021c2029b958b2363abec4a11db0ce8cd43abb0d9ce44284cb97217e7", + "sha256:8dacc4073c359f40fcf73aede8428c35f84639baad7e1b46fce5ab7a8a7be4bb", + "sha256:8f33d1156241c43755137288dea619105477961cfa7e47f48dbf96bc2c30720b", + "sha256:8ffd4b204d7de77b5dd558cdff986a8274796a1e57813ed005b33fd97e29f059", + "sha256:93a280c9eb736a0dcca19296f3c30c720cb41a71b1f9e617f341f0a8e791a69b", + "sha256:9a4f66259bdd6964d8cf26142733c81fb562252db74ea367d9beb4f815478e72", + "sha256:9a9d4ff06804920388aab69c5ea8a77525cf165356db70131616acd269e19b36", + "sha256:a2070c5affdb3a5e751f24208c5c4f3d5f008fa04d28731416e023c93b275277", + "sha256:a4857f7e2bc6921dbd487c5c88b84f5633de3e7d416c4dc0bb70256775551a6c", + "sha256:a607ae05b6c96057ba86c811d9c43423f35e03874ffb03fbdcd45e0637e8b631", + "sha256:a66ca3bdf21c653e47f726ca57f46ba7fc1f260ad99ba783acc3e58e3ebdb9ff", + "sha256:ab110c48bc3d97b4d19af41865e14531f300b482da21783fdaacd159251890e8", + "sha256:b239711e774c8eb910e9b1ac719f02f5ae4bf35fa0420f438cdc3a7e4e7dd6ec", + "sha256:be0416074d7f253865bb67630cf7210cbc14eb05f4099cc0f82430135aaa7a3b", + "sha256:c46643970dff9f5c976c6512fd35768c4a3819f01f61169d8cdac3f9290903b7", + "sha256:c5ec71fd4a43b6d84ddb88c1df94572479d9a26ef3f150cef3dacefecf888105", + "sha256:c6e5174f8ca585755988bc278c8bb5d02d9dc2e971591ef4a1baabdf2d99589b", + "sha256:c89b558f8a9a5a6f2cfc923c304d49f0ce629c3bd85cb442ca258ec20366394c", + "sha256:cc44e3545d908ecf3e5773266c487ad1877be718d9dc65fc7eb6e7d14960985b", + "sha256:cc6f8246e74dd210d7e2b56c76ceaba1cc52b025cd75dbe96eb48791e0250e98", + "sha256:cd556c79ad665faeae28020a0ab3bda6cd47d94bec48e36970719b0b86e4dcf4", + "sha256:ce6f3a147b4b1a8b09aae48517ae91139b1b010c5f36423fa2b866a8b23df879", + "sha256:ceb499d2b3d1d7b7ba23abe8bf26df5f06ba8c71127f188333dddcf356b4b63f", + "sha256:cef06fb382557f66d81d804230c11ab292d94b840b3cb7bf4450778377b592f4", + "sha256:e448f56cfeae7b1b3b5bcd99bb377cde7c4eb1970a525c770720a352bc4c8044", + "sha256:e52d3d95df81c8f6b2a1685aabffadf2d2d9ad97203a40f8d61e51b70f191e4e", + "sha256:ee2f1d1c223c3d2c24e3afbb2dd38be3f03b1a8d6a83ee3d9eb8c36a52bee899", + "sha256:f2c6888eada180814b8583c3e793f3f343a692fc802546eed45f40a001b1169f", + "sha256:f51dbba78d68a44e99d484ca8c8f604f17e957c1ca09c3ebc2c7e3bbd9ba0448", + "sha256:f54de00baf200b4539a5a092a759f000b5f45fd226d6d25a76b0dff71177a714", + "sha256:fa10fee7e32213f5c7b0d6428ea92e3a3fdd6d725590238a3f92c0de1c78b9d2", + "sha256:fabeeb121735d47d8eab8671b6b031ce08514c86b7ad8f7d5490a7b6dcd6267d", + "sha256:fac3c432851038b3e6afe086f777732bcf7f6ebbfd90951fa04ee53db6d0bcdd", + "sha256:fda29412a66099af6d6de0baa6bd7c52674de177ec2ad2630ca264142d69c6c7", + "sha256:ff1330e8bc996570221b450e2d539134baa9465f5cb98aff0e0f73f34172e0ae" + ], + "version": "==5.3.1" }, "coveralls": { "hashes": [ @@ -155,7 +165,6 @@ "hashes": [ "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d" ], - "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.18.2" }, "geocoder": { @@ -176,7 +185,6 @@ }, "geomet": { "hashes": [ - "sha256:87ae0fc42e532b9e98969c0bbf895a5e0b2bb4f6f775cf51a74e6482f1f35c2b", "sha256:91d754f7c298cbfcabd3befdb69c641c27fe75e808b27aa55028605761d17e95", "sha256:a41a1e336b381416d6cbed7f1745c848e91defaa4d4c1bdc1312732e46ffad2b" ], @@ -196,7 +204,6 @@ "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.10" }, "inflection": { @@ -204,7 +211,6 @@ "sha256:1a29730d366e996aaacffb2f1f1cb9593dc38e2ddd30c91250c6dde09ea9b417", "sha256:f38b2b640938a4f35ade69ac3d053042959b62a0f1076a5bbaa1b9526605a8a2" ], - "markers": "python_version >= '3.5'", "version": "==0.5.1" }, "influxdb": { @@ -220,7 +226,6 @@ "sha256:321b033d07f2a4136d3ec762eac9f16a10ccd60f53c0c91af90217ace7ba1f19", "sha256:b12271b2047cb23eeb98c8b5622e2e5c5e9abd9784a153e9d8ef9cb4dd09d749" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.0" }, "jinja2": { @@ -228,7 +233,6 @@ "sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0", "sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==2.11.2" }, "jsonschema": { @@ -274,7 +278,6 @@ "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7", "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.1.1" }, "more-itertools": { @@ -282,7 +285,6 @@ "sha256:8e1a2a43b2f2727425f2b5839587ae37093f19153dc26c0927d1048ff6557330", "sha256:b3a9005928e5bed54076e6e549c792b306fddfe72b2d1d22dd63d42d5d3899cf" ], - "markers": "python_version >= '3.5'", "version": "==8.6.0" }, "openapi-spec-validator": { @@ -313,92 +315,88 @@ "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0", "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.13.1" }, "py": { "hashes": [ - "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2", - "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342" + "sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3", + "sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.9.0" + "version": "==1.10.0" }, "pymongo": { "hashes": [ - "sha256:00f6c32f86a5bd1cbefcc0a27ea06565628de3bb2e6786d3f0dce0330e70c958", - "sha256:016e8162b57e2a45cb8d2356f39795ccff2ee65fd79fe078de4f9aa78ef1994b", - "sha256:078e74cffb4955a454dd0955c3fa38327185a41447ac4e368f81c2f0c07e6559", - "sha256:079a30c21d3c334ee65581a8cac5380e94521970423996c5b18a7c550230d94c", - "sha256:0b5aa85a04efcf22c176de25e3fce675510d7700f523728fa9485d576db41358", - "sha256:0dd8c0367639cd5cf84be91af6b733076119745aea6e53fdf9e581819d911eac", - "sha256:1529b23a51ef8613712b3e19225690564955250932d58487af6c060413ce0f1f", - "sha256:17a3b2148b9e0914dc5f0d6a5c636f81dc4b428b80ea45576f79cfe619844c6d", - "sha256:1f5fabe75c9b7eb5a42dac9717f952a879ab3705bcf7e9ef744cdbdfd37bcf3d", - "sha256:220216da1d4fb10f941ff5e408f2958896fe534283bb3b1c1c17d4b0ac5d8b45", - "sha256:251acfa87e07e47044ed7f6157fc63a95020bb4ee9d705fb2faf3b67e6a3574e", - "sha256:2542b21b08dc30bf0a69de55a42225f25b0af0dea7852edf2011952abb50e7b4", - "sha256:2bcbde25342fa0991b0c144c8eafadc77e605e7940bf462922f6d550f88d6777", - "sha256:2c0b0d201182bfbbfb2c679af3118ca53926a31d8c0c21e9b7943f8264ec0052", - "sha256:319f1b5a8373e280026905093aaacf4cca858a9ae653e456b9f9f9ad5f308088", - "sha256:342248b25c193ab20e1145989455d614d4d553334576a72be600ee371fa5de41", - "sha256:3b8076ff801dca0920f1b5c76a0e062dc26bb76de7e79efcf347c3a5ff776127", - "sha256:3cf7726216a8792d147ba44433fddc19ed149531fb23899ce82d24a0a90ec2fd", - "sha256:4266ebb14bed0206c73e2f626949679c23a6f935ea4b60529d7c3551f2f3051a", - "sha256:4cc8cf971ee53ad65e53b80a6f611070dbe55640b447ae9b2b98329aebd21155", - "sha256:4da19b5c555cf1d8b8a0b980d9c97b1b0f27e05bcf278bf64cc6c30b697a59f9", - "sha256:4f0023db6168f052ffeec435ca0608ffe8abac17a36b8084bdc348c978e08a23", - "sha256:5012342f457a544818254a89f7e7a4ecd05c4eaa89ed68ae58727039c92ff0f7", - "sha256:56bbbccf22fd91ae88b2dffe56dceb84d80fa808065e6bcbedb86ec7e0f84b3b", - "sha256:5a07a9983e0bc51ad61a16c228d1b60d1f98f7df5f225f435b41f8921d335e06", - "sha256:5a88fad3dcfaa222383ceb53af9a030a841ad998636648a748d79515c8afb6b4", - "sha256:5fc04e445e58103bfddb601822ab355ffb8fb760142593e2b0f20c3940859352", - "sha256:613d30623bd1f9418d5a6999f73066b01831bf7c7789bc3fe2bf44b5fe5dc67d", - "sha256:6e7b2b589a600f467e9159df907638c2d08aca46b0f74d88ceeaa185abd9069b", - "sha256:74b6e8e240e53518a701f4cd56a518c8de2631d6019e93295499db15cf46d973", - "sha256:78d4142116d6d910f1c195f3253b6209f21bb1b06fb6c6ebf90cbf80518c4071", - "sha256:82bc20e554fe9175e6ae53db74c3f094c4d752a55e3b363b93ff93d87a868cb7", - "sha256:8becbccb58b7b37ce2aa5f387f298914ec520747f11edfbc31347bd2ba7e3481", - "sha256:903396921ad52c63c423683a141391e28edb8b6dfbd2388a6848b052a9e23863", - "sha256:9a5667eb4bc9ed805d1a06ca8b5ff7ee25df666b05cbb8f58f9ac16cac243d0b", - "sha256:9f672a8b5972a2db3284f8c0324dcaaeceedead9b4e5c836e93092b599f2dbf0", - "sha256:a08c60335a4b1c6348d5be176f379f7b69f2021d1ebaafb11586007f6268a423", - "sha256:a122c9e30e49bbd8c5c423e3ea2bcff5deb8a2c504d88cbfc3c21b036295bbf3", - "sha256:a6bf56c1f6d13b0e22db604c73fba6feca088fe7f6aa978cc215f83e2005d765", - "sha256:a9c1a2538cd120283e7137ac97ce27ebdfcb675730c5055d6332b0043f4e5a55", - "sha256:aacc9b646e142e7d32b88f0dccc6ab28f669ecf3ccc7212a274b99c83e228ef1", - "sha256:ae0403befca6a9088be702c1d94fc1b17c333cd84a43328c973d223991c41936", - "sha256:b3b939780963164086fc256436c1bf9301d4c5c99026e2c281b21237234aaa2c", - "sha256:b6282855f9193f4e7ae07c2d138b583d487d0e66add62eda7013365815188ce9", - "sha256:bb3f19af1949cbf93b17021c8c61e14e697c3f5c8923134b085dcef9d271b699", - "sha256:be124527bfc30869e8a17915e1e960150757553d58c98e56c598fbb85697e32e", - "sha256:c66de369d459f081a1860c58c6218da5e30a4c5d07277526f66f6c0b0efe742f", - "sha256:c756d00b728f0d5ec65519d9005389fea519b2ad3aef0896c49ce80e6da8b547", - "sha256:c7953352bc27ac5fbf79d43ef7bf576ad06fa06c0ae0d6ad7c36b14cd596d565", - "sha256:cb81aa36a171ed1c28d615577ab5feae8e0b8e48818833663b446dd2bb8b53cd", - "sha256:cc375f5cea8635597c21ff6bc61486ebe5dca5e662982c9e2b58a9106f92b56e", - "sha256:d349cfc776f8859c2f99ff916e307555f5615ffabfbd6162f3822f21aa1e22ed", - "sha256:dbc23ece1b111a10eb6d2475a7726b70418303d2e05078d223e7f97b286745a7", - "sha256:e3158d2824391c52020d67e629d2586af774b543a75dc9f64eb830991ac2776e", - "sha256:e5b4ba4939c2ab6550ecef1ccd7b00537de7a7e18a8f03bce0fc4786111b4d47", - "sha256:e6d72517fa7370841981770c3802e7a8ca7e94ead1fba9981349fbe8e539f7eb", - "sha256:e6e6089393646c1ef865484c27871d52ead69641dce5b53cbae2096cec615151", - "sha256:e83d61f9a247344c701147934f203117c3064c982d35396565a6ca8356bc0ea9", - "sha256:e9d7af8f668d2880fff8539188694e75e0b91d37174672a59dc5c5a0fea9f60d", - "sha256:ebf21c07353d313421212e8ac0b21b6161c81aa71a12471b58629e38c784a751", - "sha256:eebe522043450e8cf83ab7be2fc0268dfe702de586970d752cb012d6ce72309f", - "sha256:f1ee136a8648cd76b44afdff99096823e68be90d02188d30e2ccd00b58e9b353", - "sha256:f648d58915eb339329741296599fb25bc2a76474e64bdfeae4423ae83b312fb8", - "sha256:fd09c310c54c6861e1488fcd30ceffa5dcd6c2dfe9f8409f47a8266fdc698547" + "sha256:019ddf7ced8e42cc6c8c608927c799be8097237596c94ffe551f6ef70e55237e", + "sha256:047c325c4a96e7be7d11acf58639bcf71a81ca212d9c6590e3369bc28678647a", + "sha256:047cc2007b280672ddfdf2e7b862aad8d898f481f65bbc9067bfa4e420a019a9", + "sha256:061d59f525831c4051af0b6dbafa62b0b8b168d4ef5b6e3c46d0811b8499d100", + "sha256:082832a59da18efab4d9148cca396451bac99da9757f31767f706e828b5b8500", + "sha256:0a53a751d977ad02f1bd22ddb6288bb4816c4758f44a50225462aeeae9cbf6a0", + "sha256:1222025db539641071a1b67f6950f65a6342a39db5b454bf306abd6954f1ad8a", + "sha256:1580fad512c678b720784e5c9018621b1b3bd37fb5b1633e874738862d6435c7", + "sha256:202ea1d4edc8a5439fc179802d807b49e7e563207fea5610779e56674ac770c6", + "sha256:21d7b48567a1c80f9266e0ab61c1218a31279d911da345679188733e354f81cc", + "sha256:264843ce2af0640994a4331148ef5312989bc004678c457460758766c9b4decc", + "sha256:270a1f6a331eac3a393090af06df68297cb31a8b2df0bdcbd97dc613c5758e78", + "sha256:29a6840c2ac778010547cad5870f3db2e080ad7fad01197b07fff993c08692c8", + "sha256:3646c2286d889618d43e01d9810ac1fc17709d2b4dec61366df5edc8ba228b3e", + "sha256:36b9b98a39565a8f33803c81569442b35e749a72fb1aa7d0bcdb1a33052f8bcc", + "sha256:3ec8f8e106a1476659d8c020228b45614daabdbdb6c6454a843a1d4f77d13339", + "sha256:422069f2cebf58c9dd9e8040b4768f7be4f228c95bc4505e8fa8e7b4f7191ad8", + "sha256:44376a657717de8847d5d71a9305f3595c7e78c91ac77edbb87058d12ede87a6", + "sha256:45728e6aae3023afb5b2829586d1d2bfd9f0d71cfd7d3c924b71a5e9aef617a8", + "sha256:46792b71ab802d9caf1fc9d52e83399ef8e1a36e91eef4d827c06e36b8df2230", + "sha256:4942a5659ae927bb764a123a6409870ca5dd572d83b3bfb71412c9a191bbf792", + "sha256:4be4fe9d18523da98deeb0b554ac76e1dc1562ee879d62572b34dda8593efcc1", + "sha256:523804bd8fcb5255508052b50073a27c701b90a73ea46e29be46dad5fe01bde6", + "sha256:540dafd6f4a0590fc966465c726b80fa7c0804490c39786ef29236fe68c94401", + "sha256:5980509801cbd2942df31714d055d89863684b4de26829c349362e610a48694e", + "sha256:5ad7b96c27acd7e256b33f47cf3d23bd7dd902f9c033ae43f32ffcbc37bebafd", + "sha256:6122470dfa61d4909b75c98012c1577404ba4ab860d0095e0c6980560cb3711f", + "sha256:6175fd105da74a09adb38f93be96e1f64873294c906e5e722cbbc5bd10c44e3b", + "sha256:646d4d30c5aa7c0ddbfe9b990f0f77a88621024a21ad0b792bd9d58caa9611f0", + "sha256:6700e251c6396cc05d7460dc05ef8e19e60a7b53b62c007725b48e123aaa2b1c", + "sha256:6aac7e0e8de92f11a410eb68c24a2decbac6f094e82fd95d22546d0168e7a18b", + "sha256:6e7a6057481a644970e43475292e1c0af095ca39a20fe83781196bd6e6690a38", + "sha256:76579fcf77052b39796fe4a11818d1289dd48cffe15951b3403288fa163c29f6", + "sha256:7e69fa025a1db189443428f345fea5555d16413df6addc056e17bb8c9794b006", + "sha256:7f0c507e1f108790840d6c4b594019ebf595025c324c9f7e9c9b2b15b41f884e", + "sha256:813db97e9955b6b1b50b5cebd18cb148580603bb9b067ea4c5cc656b333bc906", + "sha256:82d5ded5834b6c92380847860eb28dcaf20b847a27cee5811c4aaceef87fd280", + "sha256:82f6e42ba40440a7e0a20bfe12465a3b62d65966a4c7ad1a21b36ffff88de6fe", + "sha256:8d669c720891781e7c82d412cad39f9730ef277e3957b48a3344dae47d3caa03", + "sha256:944ed467feb949e103555863fa934fb84216a096b0004ca364d3ddf9d18e2b9e", + "sha256:96c6aef7ffb0d37206c0342abb82d874fa8cdc344267277ec63f562b94335c22", + "sha256:9be785bd4e1ba0148fb00ca84e4dbfbd1c74df3af3a648559adc60b0782f34de", + "sha256:9d19843568df9d263dc92ae4cc2279879add8a26996473f9155590cac635b321", + "sha256:a118a1df7280ffab7fe0f3eab325868339ff1c4d5b8e0750db0f0a796da8f849", + "sha256:b4294ddf76452459433ecfa6a93258608b5e462c76ef15e4695ed5e2762f009f", + "sha256:b50af6701b4a5288b77fb4db44a363aa9485caf2c3e7a40c0373fd45e34440af", + "sha256:b875bb4b438931dce550e170bfb558597189b8d0160f4ac60f14a21955161699", + "sha256:b95d2c2829b5956bf54d9a22ffec911dea75abf0f0f7e0a8a57423434bfbde91", + "sha256:c046e09e886f4539f8626afba17fa8f2e6552731f9384e2827154e3e3b7fda4e", + "sha256:c2b67881392a9e85aa108e75f62cdbe372d5a3f17ea5f8d3436dcf4662052f14", + "sha256:c6cf288c9e03195d8e12b72a6388b32f18a5e9c2545622417a963e428e1fe496", + "sha256:c812b6e53344e92f10f12235219fb769c491a4a87a02c9c3f93fe632e493bda8", + "sha256:cc421babc687dc52ce0fc19787b2404518ca749d9db59576100946ff886f38ed", + "sha256:ce53c00be204ec4428d3c1f3c478ae89d388efec575544c27f57b61e9fa4a7f2", + "sha256:ce9964c117cbe5cf6269f30a2b334d28675956e988b7dbd0b4f7370924afda2e", + "sha256:d6f82e86896a8db70e8ae8fa4b7556a0f188f1d8a6c53b2ba229889d55a59308", + "sha256:d9d3ae537f61011191b2fd6f8527b9f9f8a848b37d4c85a0f7bb28004c42b546", + "sha256:e565d1e4388765c135052717f15f9e0314f9d172062444c6b3fc0002e93ed04b", + "sha256:ed98683d8f01f1c46ef2d02469e04e9a8fe9a73a9741a4e6e66677a73b59bec8", + "sha256:ef18aa15b1aa18c42933deed5233b3284186e9ed85c25d2704ceff5099a3964c", + "sha256:fa741e9c805567239f845c7e9a016aff797f9bb02ff9bc8ccd2fbd9eafefedd4", + "sha256:fc4946acb6cdada08f60aca103b61334995523da65be5fe816ea8571c9967d46", + "sha256:fcc66d17a3363b7bd6d2655de8706e25a3cd1be2bd1b8e8d8a5c504a6ef893ae" ], "index": "pypi", - "version": "==3.11.1" + "version": "==3.11.2" }, "pyrsistent": { "hashes": [ "sha256:2e636185d9eb976a18a8a8e96efce62f2905fea90041958d8cc2a189756ebf3e" ], - "markers": "python_version >= '3.5'", "version": "==0.17.3" }, "pytest": { @@ -484,11 +482,11 @@ }, "requests": { "hashes": [ - "sha256:7f1a0b932f4a60a1a65caa4263921bb7d9ee911957e0ae4a23a6dd08185ad5f8", - "sha256:e786fa28d8c9154e6a4de5d46a1d921b8749f8b74e28bde23768e5e16eece998" + "sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804", + "sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e" ], "index": "pypi", - "version": "==2.25.0" + "version": "==2.25.1" }, "rethinkdb": { "hashes": [ @@ -502,7 +500,6 @@ "sha256:74815c25aad1fe0b5fb994e96c3de63e8695164358a80138352aaadfa4760350", "sha256:d6865ed1d135ddb124a619d7cd3a5b505f69a7c92e248024dd7e48bc77752af5" ], - "markers": "python_version >= '3.5'", "version": "==1.2.0" }, "six": { @@ -510,7 +507,6 @@ "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==1.15.0" }, "swagger-ui-bundle": { @@ -525,7 +521,6 @@ "sha256:19188f96923873c92ccb987120ec4acaa12f0461fa9ce5d3d0772bc965a39e08", "sha256:d8ff90d979214d7b4f8ce956e80f4028fc6860e4431f731ea4a8c08f23f99473" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", "version": "==1.26.2" }, "werkzeug": { @@ -533,9 +528,378 @@ "sha256:2de2a5db0baeae7b2d2664949077c2ac63fbd16d98da0ff71837f7d1dea3fd43", "sha256:6c80b1e5ad3665290ea39320b91e1be1e0d5f60652b964a3070216de83d2e47c" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", "version": "==1.0.1" } }, - "develop": {} + "develop": { + "aiohttp": { + "hashes": [ + "sha256:0b795072bb1bf87b8620120a6373a3c61bfcb8da7e5c2377f4bb23ff4f0b62c9", + "sha256:0d438c8ca703b1b714e82ed5b7a4412c82577040dadff479c08405e2a715564f", + "sha256:16a3cb5df5c56f696234ea9e65e227d1ebe9c18aa774d36ff42f532139066a5f", + "sha256:1edfd82a98c5161497bbb111b2b70c0813102ad7e0aa81cbeb34e64c93863005", + "sha256:2406dc1dda01c7f6060ab586e4601f18affb7a6b965c50a8c90ff07569cf782a", + "sha256:2858b2504c8697beb9357be01dc47ef86438cc1cb36ecb6991796d19475faa3e", + "sha256:2a7b7640167ab536c3cb90cfc3977c7094f1c5890d7eeede8b273c175c3910fd", + "sha256:3228b7a51e3ed533f5472f54f70fd0b0a64c48dc1649a0f0e809bec312934d7a", + "sha256:328b552513d4f95b0a2eea4c8573e112866107227661834652a8984766aa7656", + "sha256:39f4b0a6ae22a1c567cb0630c30dd082481f95c13ca528dc501a7766b9c718c0", + "sha256:3b0036c978cbcc4a4512278e98e3e6d9e6b834dc973206162eddf98b586ef1c6", + "sha256:3ea8c252d8df5e9166bcf3d9edced2af132f4ead8ac422eac723c5781063709a", + "sha256:41608c0acbe0899c852281978492f9ce2c6fbfaf60aff0cefc54a7c4516b822c", + "sha256:59d11674964b74a81b149d4ceaff2b674b3b0e4d0f10f0be1533e49c4a28408b", + "sha256:5e479df4b2d0f8f02133b7e4430098699450e1b2a826438af6bec9a400530957", + "sha256:684850fb1e3e55c9220aad007f8386d8e3e477c4ec9211ae54d968ecdca8c6f9", + "sha256:6ccc43d68b81c424e46192a778f97da94ee0630337c9bbe5b2ecc9b0c1c59001", + "sha256:6d42debaf55450643146fabe4b6817bb2a55b23698b0434107e892a43117285e", + "sha256:710376bf67d8ff4500a31d0c207b8941ff4fba5de6890a701d71680474fe2a60", + "sha256:756ae7efddd68d4ea7d89c636b703e14a0c686688d42f588b90778a3c2fc0564", + "sha256:77149002d9386fae303a4a162e6bce75cc2161347ad2ba06c2f0182561875d45", + "sha256:78e2f18a82b88cbc37d22365cf8d2b879a492faedb3f2975adb4ed8dfe994d3a", + "sha256:7d9b42127a6c0bdcc25c3dcf252bb3ddc70454fac593b1b6933ae091396deb13", + "sha256:8389d6044ee4e2037dca83e3f6994738550f6ee8cfb746762283fad9b932868f", + "sha256:9c1a81af067e72261c9cbe33ea792893e83bc6aa987bfbd6fdc1e5e7b22777c4", + "sha256:c1e0920909d916d3375c7a1fdb0b1c78e46170e8bb42792312b6eb6676b2f87f", + "sha256:c68fdf21c6f3573ae19c7ee65f9ff185649a060c9a06535e9c3a0ee0bbac9235", + "sha256:c733ef3bdcfe52a1a75564389bad4064352274036e7e234730526d155f04d914", + "sha256:c9c58b0b84055d8bc27b7df5a9d141df4ee6ff59821f922dd73155861282f6a3", + "sha256:d03abec50df423b026a5aa09656bd9d37f1e6a49271f123f31f9b8aed5dc3ea3", + "sha256:d2cfac21e31e841d60dc28c0ec7d4ec47a35c608cb8906435d47ef83ffb22150", + "sha256:dcc119db14757b0c7bce64042158307b9b1c76471e655751a61b57f5a0e4d78e", + "sha256:df3a7b258cc230a65245167a202dd07320a5af05f3d41da1488ba0fa05bc9347", + "sha256:df48a623c58180874d7407b4d9ec06a19b84ed47f60a3884345b1a5099c1818b", + "sha256:e1b95972a0ae3f248a899cdbac92ba2e01d731225f566569311043ce2226f5e7", + "sha256:f326b3c1bbfda5b9308252ee0dcb30b612ee92b0e105d4abec70335fab5b1245", + "sha256:f411cb22115cb15452d099fec0ee636b06cf81bfb40ed9c02d30c8dc2bc2e3d1" + ], + "index": "pypi", + "version": "==3.7.3" + }, + "async-timeout": { + "hashes": [ + "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f", + "sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3" + ], + "version": "==3.0.1" + }, + "attrs": { + "hashes": [ + "sha256:31b2eced602aa8423c2aea9c76a724617ed67cf9513173fd3a4f03e3a929c7e6", + "sha256:832aa3cde19744e49938b91fea06d69ecb9e649c93ba974535d08ad92164f700" + ], + "version": "==20.3.0" + }, + "chardet": { + "hashes": [ + "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa", + "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5" + ], + "version": "==4.0.0" + }, + "cycler": { + "hashes": [ + "sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d", + "sha256:cd7b2d1018258d7247a71425e9f26463dfb444d411c39569972f4ce586b0c9d8" + ], + "version": "==0.10.0" + }, + "idna": { + "hashes": [ + "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6", + "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0" + ], + "version": "==2.10" + }, + "kiwisolver": { + "hashes": [ + "sha256:0cd53f403202159b44528498de18f9285b04482bab2a6fc3f5dd8dbb9352e30d", + "sha256:1e1bc12fb773a7b2ffdeb8380609f4f8064777877b2225dec3da711b421fda31", + "sha256:225e2e18f271e0ed8157d7f4518ffbf99b9450fca398d561eb5c4a87d0986dd9", + "sha256:232c9e11fd7ac3a470d65cd67e4359eee155ec57e822e5220322d7b2ac84fbf0", + "sha256:31dfd2ac56edc0ff9ac295193eeaea1c0c923c0355bf948fbd99ed6018010b72", + "sha256:33449715e0101e4d34f64990352bce4095c8bf13bed1b390773fc0a7295967b3", + "sha256:401a2e9afa8588589775fe34fc22d918ae839aaaf0c0e96441c0fdbce6d8ebe6", + "sha256:44a62e24d9b01ba94ae7a4a6c3fb215dc4af1dde817e7498d901e229aaf50e4e", + "sha256:50af681a36b2a1dee1d3c169ade9fdc59207d3c31e522519181e12f1b3ba7000", + "sha256:563c649cfdef27d081c84e72a03b48ea9408c16657500c312575ae9d9f7bc1c3", + "sha256:5989db3b3b34b76c09253deeaf7fbc2707616f130e166996606c284395da3f18", + "sha256:5a7a7dbff17e66fac9142ae2ecafb719393aaee6a3768c9de2fd425c63b53e21", + "sha256:5c3e6455341008a054cccee8c5d24481bcfe1acdbc9add30aa95798e95c65621", + "sha256:5f6ccd3dd0b9739edcf407514016108e2280769c73a85b9e59aa390046dbf08b", + "sha256:72c99e39d005b793fb7d3d4e660aed6b6281b502e8c1eaf8ee8346023c8e03bc", + "sha256:78751b33595f7f9511952e7e60ce858c6d64db2e062afb325985ddbd34b5c131", + "sha256:834ee27348c4aefc20b479335fd422a2c69db55f7d9ab61721ac8cd83eb78882", + "sha256:8be8d84b7d4f2ba4ffff3665bcd0211318aa632395a1a41553250484a871d454", + "sha256:950a199911a8d94683a6b10321f9345d5a3a8433ec58b217ace979e18f16e248", + "sha256:a357fd4f15ee49b4a98b44ec23a34a95f1e00292a139d6015c11f55774ef10de", + "sha256:a53d27d0c2a0ebd07e395e56a1fbdf75ffedc4a05943daf472af163413ce9598", + "sha256:acef3d59d47dd85ecf909c359d0fd2c81ed33bdff70216d3956b463e12c38a54", + "sha256:b38694dcdac990a743aa654037ff1188c7a9801ac3ccc548d3341014bc5ca278", + "sha256:b9edd0110a77fc321ab090aaa1cfcaba1d8499850a12848b81be2222eab648f6", + "sha256:c08e95114951dc2090c4a630c2385bef681cacf12636fb0241accdc6b303fd81", + "sha256:c5518d51a0735b1e6cee1fdce66359f8d2b59c3ca85dc2b0813a8aa86818a030", + "sha256:c8fd0f1ae9d92b42854b2979024d7597685ce4ada367172ed7c09edf2cef9cb8", + "sha256:ca3820eb7f7faf7f0aa88de0e54681bddcb46e485beb844fcecbcd1c8bd01689", + "sha256:cf8b574c7b9aa060c62116d4181f3a1a4e821b2ec5cbfe3775809474113748d4", + "sha256:d3155d828dec1d43283bd24d3d3e0d9c7c350cdfcc0bd06c0ad1209c1bbc36d0", + "sha256:f8d6f8db88049a699817fd9178782867bf22283e3813064302ac59f61d95be05", + "sha256:fd34fbbfbc40628200730bc1febe30631347103fc8d3d4fa012c21ab9c11eca9" + ], + "version": "==1.3.1" + }, + "matplotlib": { + "hashes": [ + "sha256:09225edca87a79815822eb7d3be63a83ebd4d9d98d5aa3a15a94f4eee2435954", + "sha256:0caa687fce6174fef9b27d45f8cc57cbc572e04e98c81db8e628b12b563d59a2", + "sha256:27c9393fada62bd0ad7c730562a0fecbd3d5aaa8d9ed80ba7d3ebb8abc4f0453", + "sha256:2c2c5041608cb75c39cbd0ed05256f8a563e144234a524c59d091abbfa7a868f", + "sha256:2d31aff0c8184b05006ad756b9a4dc2a0805e94d28f3abc3187e881b6673b302", + "sha256:3a4c3e9be63adf8e9b305aa58fb3ec40ecc61fd0f8fd3328ce55bc30e7a2aeb0", + "sha256:5111d6d47a0f5b8f3e10af7a79d5e7eb7e73a22825391834734274c4f312a8a0", + "sha256:5ed3d3342698c2b1f3651f8ea6c099b0f196d16ee00e33dc3a6fee8cb01d530a", + "sha256:6ffd2d80d76df2e5f9f0c0140b5af97e3b87dd29852dcdb103ec177d853ec06b", + "sha256:746897fbd72bd462b888c74ed35d812ca76006b04f717cd44698cdfc99aca70d", + "sha256:756ee498b9ba35460e4cbbd73f09018e906daa8537fff61da5b5bf8d5e9de5c7", + "sha256:7ad44f2c74c50567c694ee91c6fa16d67e7c8af6f22c656b80469ad927688457", + "sha256:83e6c895d93fdf93eeff1a21ee96778ba65ef258e5d284160f7c628fee40c38f", + "sha256:9b03722c89a43a61d4d148acfc89ec5bb54cd0fd1539df25b10eb9c5fa6c393a", + "sha256:a4fe54eab2c7129add75154823e6543b10261f9b65b2abe692d68743a4999f8c", + "sha256:b1b60c6476c4cfe9e5cf8ab0d3127476fd3d5f05de0f343a452badaad0e4bdec", + "sha256:b26c472847911f5a7eb49e1c888c31c77c4ddf8023c1545e0e8e0367ba74fb15", + "sha256:b2a5e1f637a92bb6f3526cc54cc8af0401112e81ce5cba6368a1b7908f9e18bc", + "sha256:b7b09c61a91b742cb5460b72efd1fe26ef83c1c704f666e0af0df156b046aada", + "sha256:b8ba2a1dbb4660cb469fe8e1febb5119506059e675180c51396e1723ff9b79d9", + "sha256:c092fc4673260b1446b8578015321081d5db73b94533fe4bf9b69f44e948d174", + "sha256:c586ac1d64432f92857c3cf4478cfb0ece1ae18b740593f8a39f2f0b27c7fda5", + "sha256:d082f77b4ed876ae94a9373f0db96bf8768a7cca6c58fc3038f94e30ffde1880", + "sha256:e71cdd402047e657c1662073e9361106c6981e9621ab8c249388dfc3ec1de07b", + "sha256:eb6b6700ea454bb88333d98601e74928e06f9669c1ea231b4c4c666c1d7701b4" + ], + "index": "pypi", + "version": "==3.3.3" + }, + "multidict": { + "hashes": [ + "sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a", + "sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93", + "sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632", + "sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656", + "sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79", + "sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7", + "sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d", + "sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5", + "sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224", + "sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26", + "sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea", + "sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348", + "sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6", + "sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76", + "sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1", + "sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f", + "sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952", + "sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a", + "sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37", + "sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9", + "sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359", + "sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8", + "sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da", + "sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3", + "sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d", + "sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf", + "sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841", + "sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d", + "sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93", + "sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f", + "sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647", + "sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635", + "sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456", + "sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda", + "sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5", + "sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281", + "sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80" + ], + "version": "==5.1.0" + }, + "numpy": { + "hashes": [ + "sha256:08308c38e44cc926bdfce99498b21eec1f848d24c302519e64203a8da99a97db", + "sha256:09c12096d843b90eafd01ea1b3307e78ddd47a55855ad402b157b6c4862197ce", + "sha256:13d166f77d6dc02c0a73c1101dd87fdf01339febec1030bd810dcd53fff3b0f1", + "sha256:141ec3a3300ab89c7f2b0775289954d193cc8edb621ea05f99db9cb181530512", + "sha256:16c1b388cc31a9baa06d91a19366fb99ddbe1c7b205293ed072211ee5bac1ed2", + "sha256:18bed2bcb39e3f758296584337966e68d2d5ba6aab7e038688ad53c8f889f757", + "sha256:1aeef46a13e51931c0b1cf8ae1168b4a55ecd282e6688fdb0a948cc5a1d5afb9", + "sha256:27d3f3b9e3406579a8af3a9f262f5339005dd25e0ecf3cf1559ff8a49ed5cbf2", + "sha256:2a2740aa9733d2e5b2dfb33639d98a64c3b0f24765fed86b0fd2aec07f6a0a08", + "sha256:4377e10b874e653fe96985c05feed2225c912e328c8a26541f7fc600fb9c637b", + "sha256:448ebb1b3bf64c0267d6b09a7cba26b5ae61b6d2dbabff7c91b660c7eccf2bdb", + "sha256:50e86c076611212ca62e5a59f518edafe0c0730f7d9195fec718da1a5c2bb1fc", + "sha256:5734bdc0342aba9dfc6f04920988140fb41234db42381cf7ccba64169f9fe7ac", + "sha256:64324f64f90a9e4ef732be0928be853eee378fd6a01be21a0a8469c4f2682c83", + "sha256:6ae6c680f3ebf1cf7ad1d7748868b39d9f900836df774c453c11c5440bc15b36", + "sha256:6d7593a705d662be5bfe24111af14763016765f43cb6923ed86223f965f52387", + "sha256:8cac8790a6b1ddf88640a9267ee67b1aee7a57dfa2d2dd33999d080bc8ee3a0f", + "sha256:8ece138c3a16db8c1ad38f52eb32be6086cc72f403150a79336eb2045723a1ad", + "sha256:9eeb7d1d04b117ac0d38719915ae169aa6b61fca227b0b7d198d43728f0c879c", + "sha256:a09f98011236a419ee3f49cedc9ef27d7a1651df07810ae430a6b06576e0b414", + "sha256:a5d897c14513590a85774180be713f692df6fa8ecf6483e561a6d47309566f37", + "sha256:ad6f2ff5b1989a4899bf89800a671d71b1612e5ff40866d1f4d8bcf48d4e5764", + "sha256:c42c4b73121caf0ed6cd795512c9c09c52a7287b04d105d112068c1736d7c753", + "sha256:cb1017eec5257e9ac6209ac172058c430e834d5d2bc21961dceeb79d111e5909", + "sha256:d6c7bb82883680e168b55b49c70af29b84b84abb161cbac2800e8fcb6f2109b6", + "sha256:e452dc66e08a4ce642a961f134814258a082832c78c90351b75c41ad16f79f63", + "sha256:e5b6ed0f0b42317050c88022349d994fe72bfe35f5908617512cd8c8ef9da2a9", + "sha256:e9b30d4bd69498fc0c3fe9db5f62fffbb06b8eb9321f92cc970f2969be5e3949", + "sha256:ec149b90019852266fec2341ce1db513b843e496d5a8e8cdb5ced1923a92faab", + "sha256:edb01671b3caae1ca00881686003d16c2209e07b7ef8b7639f1867852b948f7c", + "sha256:f0d3929fe88ee1c155129ecd82f981b8856c5d97bcb0d5f23e9b4242e79d1de3", + "sha256:f29454410db6ef8126c83bd3c968d143304633d45dc57b51252afbd79d700893", + "sha256:fe45becb4c2f72a0907c1d0246ea6449fe7a9e2293bb0e11c4e9a32bb0930a15", + "sha256:fedbd128668ead37f33917820b704784aff695e0019309ad446a6d0b065b57e4" + ], + "version": "==1.19.4" + }, + "pandas": { + "hashes": [ + "sha256:0a643bae4283a37732ddfcecab3f62dd082996021b980f580903f4e8e01b3c5b", + "sha256:0de3ddb414d30798cbf56e642d82cac30a80223ad6fe484d66c0ce01a84d6f2f", + "sha256:19a2148a1d02791352e9fa637899a78e371a3516ac6da5c4edc718f60cbae648", + "sha256:21b5a2b033380adbdd36b3116faaf9a4663e375325831dac1b519a44f9e439bb", + "sha256:24c7f8d4aee71bfa6401faeba367dd654f696a77151a8a28bc2013f7ced4af98", + "sha256:26fa92d3ac743a149a31b21d6f4337b0594b6302ea5575b37af9ca9611e8981a", + "sha256:2860a97cbb25444ffc0088b457da0a79dc79f9c601238a3e0644312fcc14bf11", + "sha256:2b1c6cd28a0dfda75c7b5957363333f01d370936e4c6276b7b8e696dd500582a", + "sha256:2c2f7c670ea4e60318e4b7e474d56447cf0c7d83b3c2a5405a0dbb2600b9c48e", + "sha256:3be7a7a0ca71a2640e81d9276f526bca63505850add10206d0da2e8a0a325dae", + "sha256:4c62e94d5d49db116bef1bd5c2486723a292d79409fc9abd51adf9e05329101d", + "sha256:5008374ebb990dad9ed48b0f5d0038124c73748f5384cc8c46904dace27082d9", + "sha256:5447ea7af4005b0daf695a316a423b96374c9c73ffbd4533209c5ddc369e644b", + "sha256:573fba5b05bf2c69271a32e52399c8de599e4a15ab7cec47d3b9c904125ab788", + "sha256:5a780260afc88268a9d3ac3511d8f494fdcf637eece62fb9eb656a63d53eb7ca", + "sha256:70865f96bb38fec46f7ebd66d4b5cfd0aa6b842073f298d621385ae3898d28b5", + "sha256:731568be71fba1e13cae212c362f3d2ca8932e83cb1b85e3f1b4dd77d019254a", + "sha256:b61080750d19a0122469ab59b087380721d6b72a4e7d962e4d7e63e0c4504814", + "sha256:bf23a3b54d128b50f4f9d4675b3c1857a688cc6731a32f931837d72effb2698d", + "sha256:c16d59c15d946111d2716856dd5479221c9e4f2f5c7bc2d617f39d870031e086", + "sha256:c61c043aafb69329d0f961b19faa30b1dab709dd34c9388143fc55680059e55a", + "sha256:c94ff2780a1fd89f190390130d6d36173ca59fcfb3fe0ff596f9a56518191ccb", + "sha256:edda9bacc3843dfbeebaf7a701763e68e741b08fccb889c003b0a52f0ee95782", + "sha256:f10fc41ee3c75a474d3bdf68d396f10782d013d7f67db99c0efbfd0acb99701b" + ], + "index": "pypi", + "version": "==1.1.5" + }, + "pillow": { + "hashes": [ + "sha256:006de60d7580d81f4a1a7e9f0173dc90a932e3905cc4d47ea909bc946302311a", + "sha256:0a2e8d03787ec7ad71dc18aec9367c946ef8ef50e1e78c71f743bc3a770f9fae", + "sha256:0eeeae397e5a79dc088d8297a4c2c6f901f8fb30db47795113a4a605d0f1e5ce", + "sha256:11c5c6e9b02c9dac08af04f093eb5a2f84857df70a7d4a6a6ad461aca803fb9e", + "sha256:2fb113757a369a6cdb189f8df3226e995acfed0a8919a72416626af1a0a71140", + "sha256:4b0ef2470c4979e345e4e0cc1bbac65fda11d0d7b789dbac035e4c6ce3f98adb", + "sha256:59e903ca800c8cfd1ebe482349ec7c35687b95e98cefae213e271c8c7fffa021", + "sha256:5abd653a23c35d980b332bc0431d39663b1709d64142e3652890df4c9b6970f6", + "sha256:5f9403af9c790cc18411ea398a6950ee2def2a830ad0cfe6dc9122e6d528b302", + "sha256:6b4a8fd632b4ebee28282a9fef4c341835a1aa8671e2770b6f89adc8e8c2703c", + "sha256:6c1aca8231625115104a06e4389fcd9ec88f0c9befbabd80dc206c35561be271", + "sha256:795e91a60f291e75de2e20e6bdd67770f793c8605b553cb6e4387ce0cb302e09", + "sha256:7ba0ba61252ab23052e642abdb17fd08fdcfdbbf3b74c969a30c58ac1ade7cd3", + "sha256:7c9401e68730d6c4245b8e361d3d13e1035cbc94db86b49dc7da8bec235d0015", + "sha256:81f812d8f5e8a09b246515fac141e9d10113229bc33ea073fec11403b016bcf3", + "sha256:895d54c0ddc78a478c80f9c438579ac15f3e27bf442c2a9aa74d41d0e4d12544", + "sha256:8de332053707c80963b589b22f8e0229f1be1f3ca862a932c1bcd48dafb18dd8", + "sha256:92c882b70a40c79de9f5294dc99390671e07fc0b0113d472cbea3fde15db1792", + "sha256:95edb1ed513e68bddc2aee3de66ceaf743590bf16c023fb9977adc4be15bd3f0", + "sha256:b63d4ff734263ae4ce6593798bcfee6dbfb00523c82753a3a03cbc05555a9cc3", + "sha256:bd7bf289e05470b1bc74889d1466d9ad4a56d201f24397557b6f65c24a6844b8", + "sha256:cc3ea6b23954da84dbee8025c616040d9aa5eaf34ea6895a0a762ee9d3e12e11", + "sha256:cc9ec588c6ef3a1325fa032ec14d97b7309db493782ea8c304666fb10c3bd9a7", + "sha256:d3d07c86d4efa1facdf32aa878bd508c0dc4f87c48125cc16b937baa4e5b5e11", + "sha256:d8a96747df78cda35980905bf26e72960cba6d355ace4780d4bdde3b217cdf1e", + "sha256:e38d58d9138ef972fceb7aeec4be02e3f01d383723965bfcef14d174c8ccd039", + "sha256:eb472586374dc66b31e36e14720747595c2b265ae962987261f044e5cce644b5", + "sha256:fbd922f702582cb0d71ef94442bfca57624352622d75e3be7a1e7e9360b07e72" + ], + "version": "==8.0.1" + }, + "pyparsing": { + "hashes": [ + "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", + "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" + ], + "version": "==2.4.7" + }, + "python-dateutil": { + "hashes": [ + "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c", + "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a" + ], + "index": "pypi", + "version": "==2.8.1" + }, + "pytz": { + "hashes": [ + "sha256:3e6b7dd2d1e0a59084bcee14a17af60c5c562cdc16d828e8eba2e683d3a7e268", + "sha256:5c55e189b682d420be27c6995ba6edce0c0a77dd67bfbe2ae6607134d5851ffd" + ], + "version": "==2020.4" + }, + "six": { + "hashes": [ + "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", + "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" + ], + "version": "==1.15.0" + }, + "typing-extensions": { + "hashes": [ + "sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918", + "sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c", + "sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f" + ], + "version": "==3.7.4.3" + }, + "yarl": { + "hashes": [ + "sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e", + "sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434", + "sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366", + "sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3", + "sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec", + "sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959", + "sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e", + "sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c", + "sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6", + "sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a", + "sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6", + "sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424", + "sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e", + "sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f", + "sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50", + "sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2", + "sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc", + "sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4", + "sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970", + "sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10", + "sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0", + "sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406", + "sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896", + "sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643", + "sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721", + "sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478", + "sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724", + "sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e", + "sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8", + "sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96", + "sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25", + "sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76", + "sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2", + "sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2", + "sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c", + "sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a", + "sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71" + ], + "version": "==1.6.3" + } + } } diff --git a/docs/manuals/admin/telemetry.md b/docs/manuals/admin/telemetry.md new file mode 100644 index 00000000..719905b5 --- /dev/null +++ b/docs/manuals/admin/telemetry.md @@ -0,0 +1,199 @@ +# Telemetry + +QuantumLeap ships with a telemetry component for concurrent, low memory +footprint, efficient collection of time-varying quantities. Presently, +it is possible to collect: + +* Duration of selected code blocks; +* Python garbage collection metrics; +* Python profiler (cProfile) data; +* Operating system resource usage: maximum resident set size, user and + kernel time. + +Profiler data is collected in files that can be loaded into the Python +built-in analyser (`pstats`) whereas all other sampled quantities are +assembled into time series and output to CSV files which can be easily +imported into data analysis tools such as Pandas or a time series database +such as Crate or Timescale. + + +### Output files + +As QuantumLeap collects telemetry data, files will be written to a +monitoring directory of your choice. Duration time series are output +to CSV files having a "duration" prefix and "csv" extension. Likewise +garbage collection and operating system resource usage time series are +collected in CSV files having a prefix of "runtime" and an extension +of "csv". Finally profiler data go into files having a name of: +"profiler.PID.data" where PID is the operating system PID of the process +being profiled---e.g. "profiler.5662.data". CSV files can be read and +deleted at will without interfering with QuantumLeap's telemetry collection +process, even if QuantumLeap is restarted multiple times. On the other +hand, profiler data files should only be opened after stopping QuantumLeap. +(These files are produced by cProfile not by QuantumLeap, so it is best +not to touch them until cProfile exits.) + + +### Output format + +The profiler data files are binary files in the cProfile format as +documented in the Python standard library, hence they will not be +discussed here. The CSV files contain time series data and fields +are arranged as follows: + +- **Timepoint**: time at which the measurement was taken, expressed + as number of nanoseconds since the epoch. (Integer value.) +- **Measurement**: sampled quantity. (Float value.) +- **Label**: name used to identify a particular kind of measurement + when sampling. (String value.) +- **PID**: operating system ID of the process that sampled the quantity. + +Out of convenience, the CSV file starts with a header of: + + Timepoint, Measurement, Label, PID + +For duration files the sampled quantity is the amount of time, in +fractional seconds, that an HTTP request took to complete and the +label identifies that request using a combination of path and verb +as shown in the duration file excerpt below + + Timepoint, Measurement, Label, PID + ... + 1607092101580206000, 0.237, "/v2/notify [POST]", 5659 + ... + 1607092101580275000, 0.291, "/v2/notify [POST]", 5662 + ... + +Runtime files contain both Python garbage collection and operating +system resource usage time series. Labels and measurements are as +follows. + +- **GC collections**. Each measurement in the series represents the total + number of times the GC collector swept memory since the interpreter + was started. (This is the total across all generations.) The series + is labelled with "gc collections". +- **GC collected**. Each measurement in the series represents the total + number of objects the GC collector freed since the interpreter was + started. (This is the total across all generations.) The series is + labelled with "gc collected". +- **GC uncollectable**. Each measurement in the series represents the + total number of objects the GC collector couldn't free since the + interpreter was started. (This is the total across all generations.) + The series is labelled with "gc uncollectable". +- **User Time**. Each measurement in the series is the total amount of + time, in seconds, the process spent executing in user mode. The + series is labelled with "user time". +- **System Time**. Each measurement in the series is the total amount of + time, in seconds, the process spent executing in kernel mode. The + series is labelled with "system time". +- **Maximum RSS**. Each measurement in the series is maximum resident set + size used. The value will be in kilobytes on Linux and bytes on MacOS. + The series is labelled with "max rss". + + +### Basic usage + +Telemetry is turned off by default but can easily be switched on using +the Gunicorn configuration file provided in the `server` package: +`gconfig_telemetry.py`. With this configuration, QuantumLeap will collect + +* The duration of each HTTP request; +* Python garbage collection metrics; +* Operating system resource usage: maximum resident set size, user and + kernel time. + +If profiling data are needed too, edit `gconfig_telemetry.py` to enable +Python's built-in profiler (cProfile) + + def post_worker_init(worker): + ... + monitor.start(monitoring_dir=monitoring_dir, + with_runtime=True, + with_profiler=False) + # ^ set this to True + +By default telemetry data are written to files in the `_monitoring` +directory under QuantumLeap's current working directory---if the directory +doesn't exist, it is automatically created. To choose a different location, +set the `monitoring_dir` variable in `gconfig_telemetry.py` to your liking. + +#### Turning telemetry on +As mentioned earlier, telemetry is turned off by default. To turn it on, +start QuantumLeap this way + + $ python app.py --config server/gconfig_telemetry.py + +or, to use your own Gunicorn instead of QuantumLeap's embedded one + + $ gunicorn server.wsgi --config server/gconfig_telemetry.py + +If you are using the Docker image, pass the telemetry configuration +as a command argument, as in the Docker Compose snippet below: + + quantumleap: + image: smartsdk/quantumleap:latest + command: --config server/gconfig_telemetry.py + ... + +At the moment the only way to turn telemetry off is to stop QuantumLeap +and then restart it with its default configuration---i.e. `gconfig.py`. + +#### Analysing telemetry data +Profiler data can be analysed interactively using the Python `pstats` +module as explained in the Python standard library documentation, e.g. + + $ python -m pstats profiler.5662.data + +CSV files can be easily imported into data analysis tools such as Pandas +or a time series database such as Crate or Timescale using the `COPY FROM` +statement. For added convenience, there is a `pandas_import` module in +the `telemetry` package that you can use to import all duration and +runtime CSV files found in the monitoring directory: + + $ cd ngsi-timeseries-api + $ pipenv install --dev + $ python + >>> import pandas as pd + >>> from server.telemetry.pandas_import import TelemetryDB + >>> + >>> db = TelemetryDB('/path/to/_monitoring') + +Then you can use the `TelemetryDB` methods to populate Pandas frames +with duration and runtime data combed from the CSV files. For example +here's how to calculate requests per second statistics for the version +endpoint and plot requests per second over time + + >>> get_version = db.duration().time_series('/version [GET]') + >>> rps = get_version.data().resample('1S').count() + >>> rps.describe() + ... + >>> fig = rps.plot().get_figure() + >>> fig.savefig("get-version-rps.pdf") + +For further inspiration, you can have a look at the `analysis` module +in the `tests/benchmark` directory. + + +### Advanced usage + +Power users who need to instrument the code to investigate performance +bottlenecks can do so by decorating functions with a duration sampler +as in the example below where a `time_it` decorator is added to the +the version endpoint's handler. + + from server.telemetry.monitor import time_it + + @time_it(label='version()') + def version(): + ... + +It is also possible to time specific blocks of code inside functions +or methods or in the outer module's scope, please refer to the documentation +of the `monitor` module for the details. + +For more advanced scenarios or for writing your own samplers, first +familiarise yourself with the `observation` module (core functionality, +it comes with numerous examples), then have a look at the samplers in +the `sampler` module to see how to write one, finally you can use the +implementation of the `monitor` module as a starting point for wiring +together the building blocks to make them fit for your use case. diff --git a/mkdocs.yml b/mkdocs.yml index d154af3f..dbe8d4ff 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -23,3 +23,4 @@ nav: - 'Grafana': 'admin/grafana.md' - 'Data-Migration': 'admin/dataMigration.md' - 'Bechmarks': 'admin/benchmarks.md' + - 'Telemetry': 'admin/telemetry.md' diff --git a/src/server/gconfig_telemetry.py b/src/server/gconfig_telemetry.py new file mode 100644 index 00000000..2d85a2ed --- /dev/null +++ b/src/server/gconfig_telemetry.py @@ -0,0 +1,34 @@ +import multiprocessing +import os +import server +import server.telemetry.monitor as monitor + + +bind = f"{server.DEFAULT_HOST}:{server.DEFAULT_PORT}" +workers = multiprocessing.cpu_count() * 4 + 1 +worker_class = 'gthread' +threads = 1 +loglevel = 'error' + + +monitoring_dir = '_monitoring' + + +def post_worker_init(worker): + os.makedirs(monitoring_dir, exist_ok=True) + monitor.start(monitoring_dir=monitoring_dir, + with_runtime=True, + with_profiler=False) + + +def pre_request(worker, req): + req.duration_sample_id = monitor.start_duration_sample() + + +def post_request(worker, req, environ, resp): + key = f"{req.path} [{req.method}]" + monitor.stop_duration_sample(key, req.duration_sample_id) + + +def worker_exit(servo, worker): + monitor.stop() diff --git a/src/server/telemetry/__init__.py b/src/server/telemetry/__init__.py new file mode 100644 index 00000000..07357a70 --- /dev/null +++ b/src/server/telemetry/__init__.py @@ -0,0 +1,14 @@ +""" +Thread-safe, low memory footprint, and efficient collection of time-varying +quantities. + +For common telemetry scenarios (timing, profiling, GC) you should just be +able to use the ``monitor`` module as is. See there for details and usage. + +For more advanced scenarios or writing your own samplers, familiarise +yourself with the ``observation`` module (core functionality, comes with +lots of examples) first, then have a look at the samplers in the ``sampler`` +module to see how to write one, finally you can use the implementation of +the ``monitor`` module as a starting point for wiring together the building +blocks to make them fit for your use case. +""" diff --git a/src/server/telemetry/flush.py b/src/server/telemetry/flush.py new file mode 100644 index 00000000..e02d59e8 --- /dev/null +++ b/src/server/telemetry/flush.py @@ -0,0 +1,110 @@ +""" +Flushing of time series memory buffers to permanent storage. +Each buffer is saved to its own file to avoid race conditions among processes +and threads. Saving of data is efficient since it's based on streams and +lock-free, i.e. there's no need to acquire global locks to coordinate +writers. Files are written to a configured target directory atomically and +with unique names. This avoids interference with other programs that may be +processing previously written files. For example, another program can safely +scan the directory, aggregate data in each file, process the aggregate and +then delete the processed files with no risk of race conditions w/r/t the +writers in this module. +""" + +import csv +import os +from uuid import uuid4 + +from server.telemetry.observation import ObservationStore, \ + ObservationStoreAction, tabulate + + +TIMEPOINT_CSV_FIELD = 'Timepoint' +""" +Name of the observation's timepoint field in the CSV header. +""" +MEASUREMENT_CSV_FIELD = 'Measurement' +""" +Name of the observation's measurement field in the CSV header. +""" +LABEL_CSV_FIELD = 'Label' +""" +Name of the observation's label field in the CSV header. +""" +PID_CSV_FIELD = 'PID' +""" +Name of the PID field in the CSV header. +""" + +OBSERVATION_STORE_HEADER = [TIMEPOINT_CSV_FIELD, MEASUREMENT_CSV_FIELD, + LABEL_CSV_FIELD, PID_CSV_FIELD] +""" +Header of the CSV file where observation store contents get written. +""" + + +def flush_to_csv(target_dir: str, filename_prefix: str) \ + -> ObservationStoreAction: + """ + Build an action to stream the contents of an observation store to a CSV + file. Write the file *atomically* to the specified target directory and + with a unique file name. Write CSV fields in this order: time point, + measurement, label, PID. Notice PID is the process ID of the current + process which isn't part of the observation store but is added by this + function to each row. + + :param target_dir: the directory where to write the file. + :param filename_prefix: a string to prepend to the generated unique file + name. + :return: a function that takes an observation store and writes its contents + to file. + """ + return lambda store: _save_csv(target_dir, filename_prefix, store) + + +def _save_csv(target_dir: str, filename_prefix: str, + store: ObservationStore): + temp_name, filename = _file_names(filename_prefix) + temp_path = os.path.join(target_dir, temp_name) # (*) + target_path = os.path.join(target_dir, filename) + + _write_csv(temp_path, store) + os.rename(temp_path, target_path) # (*) + + # NOTE. Atomic move. Rename is atomic but won't work across file systems, + # see + # - https://alexwlchan.net/2019/03/atomic-cross-filesystem-moves-in-python/ + # If you try moving a file across file systems you get an error similar to: + # + # OSError: [Errno 18] Cross-device link: + # '/tmp/file.csv' -> '/dir/on/other/fs/file.csv' + # + # This is why we write the file directly to the target dir with a temp name + # and then do the move. In fact, putting the file in a temp dir and then + # moving it to the target dir may fail if the two dirs are on different + # file systems. + + +def _file_names(filename_prefix: str) -> (str, str): + fid = uuid4().hex + temp_name = f"{filename_prefix}.{fid}.tmp" + target_name = f"{filename_prefix}.{fid}.csv" + return temp_name, target_name + + +def _write_csv(path: str, content: ObservationStore): + pid = os.getpid() + ts = ((t, m, k, pid) for t, m, k in tabulate(content)) # (1) + with open(path, mode='w') as fd: + w = csv.writer(fd, delimiter=',', quotechar='"', + quoting=csv.QUOTE_MINIMAL) # (2) + w.writerow(OBSERVATION_STORE_HEADER) + w.writerows(ts) + + # NOTES. + # 1. Lazy evaluation. Parens, contrary to square brackets, don't force + # evaluation, so we won't wind up with double the memory of the store. + # See: + # - https://stackoverflow.com/questions/18883414 + # 2. CSV quoting. Only quoting fields if they contain a delimiter or the + # quote char. diff --git a/src/server/telemetry/monitor.py b/src/server/telemetry/monitor.py new file mode 100644 index 00000000..2bd7bba7 --- /dev/null +++ b/src/server/telemetry/monitor.py @@ -0,0 +1,330 @@ +""" +Description +----------- +Convenience module to provide a simple interface for common monitoring +scenarios. Using this module you can easily: + +* time the duration of selected code blocks; +* turn on the Python built-in profiler (cProfile); +* gather garbage collection and OS resource usage metrics. + +Duration, GC and OS measurements are assembled in time series. Every time +you sample a duration, a corresponding measurement is added to an underlying +duration series at the current time point. GC and OS metrics, if enabled, +work similarly, except they're automatically gathered in a background thread +every second. Notice we use a nanosecond-resolution, high-precision timer +under the bonnet. Time series data are collected in a memory buffer which +gets flushed to file as soon as the buffer fills. Files are written to a +directory of your choice. + + +Concurrency and performance +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The whole data collection process, from memory to file, is designed to be +efficient and have a low memory footprint in order to minimise impact on +the runtime performance of the process being monitored and guarantee accurate +measurements. At the same time, it is thread-safe and can even be used by +multiple processes simultaneously, e.g. a Gunicorn WSGI app configured with +a pre-forked server model. (See the documentation of the ``flush`` module +about parallel reader/writer processes.) + +As a frame of reference, average overhead for collecting a duration sample +is 0.31 ms. Memory gets capped at 1 MiB as noted below. (You can use the +overhead gauge script in the tests directory to experiment yourself.) + + +Output files +^^^^^^^^^^^^ +Time series data are collected in a memory buffer which gets flushed to +file when the buffer's memory grows bigger than 1 MiB. Files are written to +a directory of your choice with file names having the following prefixes: +the value of ``DURATION_FILE_PREFIX`` for duration series, the value of +``RUNTIME_FILE_PREFIX`` for GC & OS metrics, and ``PROFILER_FILE_PREFIX`` +for profiler data. The file format is CSV and fields are arranged as +follows: + +* **Timepoint**: time at which the measurement was taken, expressed as number + of nanoseconds since the epoch. (Integer value.) +* **Measurement**: sampled quantity. (Float value.) +* **Label**: name used to identify a particular kind of measurement when + sampling. (String value.) +* **PID**: operating system ID of the process that sampled the quantity. + +Out of convenience, the CSV file starts with a header of: ``Timepoint, +Measurement, Label, PID``. + + +Usage +^^^^^ +You start a monitoring session with a call to the ``start`` function +:: + import server.telemetry.monitor as monitor + + monitor.start(monitoring_dir='/my/output/', # where to output files + with_runtime=True, # enable GC & OS sampling + with_profiler=False) # but keep profiler off + +This function should be called exactly once, so it's best to call it from +the main thread when the process starts. There's also a ``stop`` function +you should call just before the process exits to make sure all memory buffers +get flushed to file: +:: + monitor.stop() + +This function too should be called exactly once. With set-up and tear-down +out of the way, let's have a look at how to time a code block: +:: + sample_id = monitor.start_duration_sample() # start timer + try: + # do stuff + finally: + key = 'my code block id' # unique name for this series + monitor.stop_duration_sample(key, sample_id) # stop timer + +Now every time this block of code gets hit, a new duration sample ends up +in the "my code block id" series. If you later open up the duration file +where the series gets saved, you should be able to see something similar +to +:: + Timepoint, Measurement, Label, PID + ... + 1607092101580206000, 0.023, "my code block id", 5662 + ... + 1607092101580275000, 0.029, "my code block id", 5662 + ... + +Timing your code with ``try/finally`` clauses like we did earlier is +quite verbose, so we have a context decorator you can use to get rid +of the boilerplate. This code snippet is equivalent to the one we saw +earlier: +:: + from server.telemetry.monitor import time_it + + with time_it(label='my code block id'): + # do stuff + +``time_it`` wraps the code block following the ``with`` statement to +run the same timing instructions we wired in manually earlier. And it +works with functions too: +:: + @time_it(label='my_func') + def my_func(): + # do stuff + +Notice when we called the ``start`` method earlier, we turned on +collection of runtime metrics by passing in: ``with_runtime=True``. +With runtime metrics collection enabled, a background +thread gathers GC and OS data (CPU time, memory, etc.) as detailed in +the documentation of ``GCSampler`` and ``ProcSampler``. Another thing +you can do is turn on the profiler when calling the ``start`` function. +In that case, when the process exits you'll have a profile data file +you can import into the Python profiler's stats console, e.g. +:: + python -m pstats profiler.5662.data # 5662 is the process' PID + +Finally, here's a real-world example of using this module with Gunicorn +to record the duration of each HTTP request in time series (one series +for each combination of path and verb) as well as GC and OS metrics. +To try it out yourself, start Gunicorn with a config file containing +:: + import os + import server.telemetry.monitor as monitor + + + bind = "0.0.0.0:8080" + workers = 2 # pre-fork model (two processes) + worker_class = 'gthread' # with threads, + threads = 2 # two of them for each process + loglevel = 'debug' + + + monitoring_dir = '_monitoring' # output files go in ./_monitoring + + # init monitoring with duration and runtime samplers just after Gunicorn + # forks the worker process. + def post_worker_init(worker): + os.makedirs(monitoring_dir, exist_ok=True) + monitor.start(monitoring_dir=monitoring_dir, + with_runtime=True, + with_profiler=False) + + # start the request timer just before Gunicorn hands off the request + # to the WSGI app; stash away the sample ID in the request object so + # we can use it later. + def pre_request(worker, req): + req.duration_sample_id = monitor.start_duration_sample() + + # stop the request timer as soon as the WSGI app is done with the + # request; record request duration in a time series named by combining + # HTTP path and verb. + def post_request(worker, req, environ, resp): + key = f"{req.path} [{req.method}]" + monitor.stop_duration_sample(key, req.duration_sample_id) + + # flush any left over time series data still buffered in memory just + # before the process exits. + def worker_exit(server, worker): + monitor.stop() + +""" + +from contextlib import ContextDecorator +from cProfile import Profile +import os +from threading import Lock +from typing import Optional + +from server.telemetry.observation import ObservationBucket +from server.telemetry.flush import flush_to_csv +from server.telemetry.sampler import DurationSampler, RuntimeBackgroundSampler + + +DURATION_FILE_PREFIX = 'duration' +RUNTIME_FILE_PREFIX = 'runtime' +PROFILER_FILE_PREFIX = 'profiler' + + +def _new_bucket(monitoring_dir: str, prefix: str) -> ObservationBucket: + return ObservationBucket( + empty_action=flush_to_csv( + target_dir=monitoring_dir, filename_prefix=prefix) + ) + + +def _new_duration_sampler(monitoring_dir: str) -> DurationSampler: + bucket = _new_bucket(monitoring_dir, DURATION_FILE_PREFIX) + return DurationSampler(bucket) + + +def _start_runtime_sampler(monitoring_dir: str) -> ObservationBucket: + bucket = _new_bucket(monitoring_dir, RUNTIME_FILE_PREFIX) + RuntimeBackgroundSampler(bucket).spawn() # (*) + return bucket +# NOTE. Safe empty action. You can only ever use ``RuntimeBackgroundSampler`` +# if the bucket's action can be killed at random without wreaking havoc. +# Since ``flush_to_csv`` writes files to the monitoring dir atomically, we +# can safely do this: all that can happen is that a tiny amount of data still +# in the buffer doesn't get written to the monitoring dir. The amount if any +# will be small b/c we call the bucket's empty method just before quitting, +# see what the stop method below does. + + +def _profiler_file_pathname(monitoring_dir: str) -> str: + pid = os.getpid() + file_name = f"{PROFILER_FILE_PREFIX}.{pid}.data" + return os.path.join(monitoring_dir, file_name) + + +class Monitor: + + def __init__(self, monitoring_dir: str, + with_runtime: bool = False, + with_profiler: bool = False): + self._monitoring_dir = monitoring_dir + self._duration_sampler = _new_duration_sampler(monitoring_dir) + self._runtime_bucket = None + self._profiler = None + self._lock = Lock() + + if with_runtime: + self._runtime_bucket = _start_runtime_sampler(monitoring_dir) + + if with_profiler: + self._profiler = Profile() + self._profiler.enable() + + def start_duration_sample(self) -> str: + return self._duration_sampler.sample() + + def stop_duration_sample(self, label: str, sample_id: str): + self._duration_sampler.collect(label, sample_id) + + def stop(self): + if self._profiler: + with self._lock: + self._profiler.disable() + outfile = _profiler_file_pathname(self._monitoring_dir) + self._profiler.dump_stats(outfile) + + self._duration_sampler.bucket().empty() + if self._runtime_bucket: + self._runtime_bucket.empty() + + +_monitor: Optional[Monitor] = None + + +def start( + monitoring_dir: str, + with_runtime: bool = False, + with_profiler: bool = False): + """ + Create a process-wide singleton monitor object to collect time series. + You should call this function early, when the process starts and in the + main thread before spawning other threads. + + :param monitoring_dir: where to output time series and profiler data files. + :param with_runtime: enable gathering GC and OS-level data. + :param with_profiler: turn on profiler. + """ + global _monitor + _monitor = Monitor(monitoring_dir, with_runtime, with_profiler) + + # NOTE. Thread-safety. Not worth making this thread-safe. If people stick + # to the docs, then start gets called when there are no other threads than + # main. + + +def start_duration_sample() -> str: + """ + Start a duration sample. This is just a convenience wrapper around + ``DurationSampler.sample``, see there for usage. + """ + if _monitor: + return _monitor.start_duration_sample() + return '' + + +def stop_duration_sample(label: str, sample_id: str): + """ + End a duration sample. This is just a convenience wrapper around + ``DurationSampler.collect``, see there for usage. + """ + if _monitor: + _monitor.stop_duration_sample(label, sample_id) + + +def stop(): + """ + Ends the monitoring session. Call this just before the process exits. + The assumption is that this function gets called **exactly** once. + """ + if _monitor: + _monitor.stop() + + # NOTE. Thread-safety. Not worth making this thread-safe. If people stick + # to the docs, then there's no issues. + + +class time_it(ContextDecorator): + """ + Context decorator to time how long a function or a code block takes to run. + """ + + def __init__(self, label: str): + """ + Create a new instance. + + :param label: a name to identify the time series where durations + of the timed function or code block should be added. + """ + self._label = label + self._sample_id: Optional[str] = None + + def __enter__(self): + self._sample_id = start_duration_sample() + return self + + def __exit__(self, *exc): + stop_duration_sample(self._label, self._sample_id) + return False diff --git a/src/server/telemetry/observation.py b/src/server/telemetry/observation.py new file mode 100644 index 00000000..02d948e8 --- /dev/null +++ b/src/server/telemetry/observation.py @@ -0,0 +1,425 @@ +""" +Thread-safe, low memory footprint, and efficient collection of time-varying +quantities. Time series data are collected in a memory buffer which gets +flushed to permanent storage when the buffer's memory grows bigger a than +a configurable threshold. +""" + +from sys import getsizeof +from threading import Lock +from time import time_ns +from typing import Callable, Dict, Generator, List, Tuple, Union + + +Observation = Tuple[int, float] +""" +A numeric observation at a time point ``t``: ``(t, measurement)``. +The first element is the time at which the measurement was taken and is +expressed as the number of nanoseconds since the epoch. The second element +is the actual quantity measured. +""" +# NOTE. Performance. Using a bare tuple instead of a class to keep memory +# footprint low. In fact, there could be quite a number of these little +# guys in memory during a sampling session... +# Unfortunately integers and floats aren't as light on memory as in other +# languages---e.g. an int is actually an object taking up at least 28 bytes +# on a 64-bit box. See: +# - https://pythonspeed.com/articles/python-integers-memory/ +# - https://stackoverflow.com/questions/10365624 +# +# NumPy would possibly have a much smaller footprint, but we'd like to keep +# this package self-contained so not to impose any external lib dependency +# on users. + +OBSERVATION_MIN_SZ = getsizeof((0, 0.0)) +""" +The least amount of memory, in bytes, an observation tuple can possibly +take up on this machine. Typically 56 bytes on a 64-bit architecture. +""" + +LabelledObservation = Tuple[int, float, str] +""" +An observation with a string label. Data are flattened in a triple +``(time, measurement, label)``. +""" + + +def when(data: Union[Observation, LabelledObservation]) -> int: + """ + The observation's time-point, in nanoseconds since the epoch. + + :param data: the observation data. + :return: the data's time-point component. + """ + return data[0] + + +def measured(data: Union[Observation, LabelledObservation]) -> float: + """ + The observation's measured quantity. + + :param data: the observation data. + :return: the data's measurement component. + """ + return data[1] + + +def named(data: LabelledObservation) -> str: + """ + The observation's label. + + :param data: the observation data. + :return: the data's label component. + """ + return data[2] + + +def _split_label(data: LabelledObservation) -> (str, Observation): + return data[2], data[0:2] + + +def observe(label: str, measurement: float) -> LabelledObservation: + """ + Record the given measurement was taken at this exact point in time. + + :param label: a unique name to identify measurements of this kind. + :param measurement: the measured numeric quantity. + :return: the corresponding ``LabelledObservation``. + + Examples: + + >>> x = observe('temp', 75.3) + >>> named(x) + 'temp' + >>> measured(x) + 75.3 + >>> when(x) > 1606832514068067000 # 1 Dec 2020 + True + """ + return time_ns(), measurement, label + + +def observe_many(*labelled_measurements: (str, float)) \ + -> List[LabelledObservation]: + """ + Record the given measurements were taken at this exact point in time. + Each measurement comes labelled with a name to identify measurements + of a given kind. + + :param labelled_measurements: a list of ``(label, measurement)`` pairs. + :return: the corresponding list of ``LabelledObservation``. + + Examples: + + >>> xs = observe_many(('temp', 75.3), ('pressure', 19.2)) + >>> len(xs) == 2 + True + + >>> when(xs[0]) == when(xs[1]) + True + + >>> named(xs[0]) + 'temp' + >>> measured(xs[0]) + 75.3 + + >>> named(xs[1]) + 'pressure' + >>> measured(xs[1]) + 19.2 + """ + now = time_ns() + return [(now, d[1], d[0]) for d in labelled_measurements] + + +ObservationSeries = List[Observation] +""" +A time series of numeric measurements. +""" + +ObservationStore = Dict[str, ObservationSeries] +""" +A collection of labelled observation time series. Each label uniquely +identifies a time series. +""" +# NOTE. Memory footprint. It looks like that using a dict with list values +# shouldn't be too bad: +# - https://stackoverflow.com/questions/10264874 + + +def _extend_series(store: ObservationStore, label: str, obs: [Observation]): + series = store.get(label, []) + series.extend(obs) + store[label] = series + + # NOTE. Memory footprint. + # According to the interwebs, it isn't really worth your while + # pre-allocating a list with an initial capacity. I have my doubts + # about this though and the effect of append on GC---i.e. what if + # the list grows in too small chunks? Is there any data structure + # we could use? Simple benchmarks show that we shouldn't have an + # issue here, but I'd still like to figure out to what extent this + # affects GC and how to optimise. + + +def observation_store(*ts: LabelledObservation) -> ObservationStore: + """ + Build an observation store out of the given labelled observations. + + :param ts: the input data. + :return: the store with the input data. + + Examples: + + >>> observation_store() + {} + + >>> ts = observe('h', 3.2), observe('k', 1.0), observe('k', 1.1) + >>> store = observation_store(*ts) + >>> len(store) == 2 + True + + >>> [measured(x) for x in store['h']] + [3.2] + >>> [measured(x) for x in store['k']] + [1.0, 1.1] + """ + store = {} + for t in ts: + label, ob = _split_label(t) + _extend_series(store, label, [ob]) + + return store + + +def merge_observation_stores(*ts: ObservationStore) -> ObservationStore: + """ + Combine observation stores into one. This happens by using a monoidal + sum ``:+:`` defined on observation stores, so that if ``t1, t2, t3, ...`` + are the input stores the result is ``((t1 :+: t2) :+: t3) :+: ...)``. + The sum of two stores, ``a :+: b`` is defined as follows. Call ``K`` the + set of keys from two stores. The result table ``r`` has keys ``k ∈ K`` + and values ``r[k] = a.get(k, []) + b.get(k, [])``. + + :param ts: the observation stores to merge. + :return: an observation store with series merged from the input stores. + + Examples: + + >>> merge_observation_stores() + {} + + >>> a_obs = observe_many(('k1', 1.0), ('k1', 2.0), ('k2', 3.0)) + >>> a = observation_store(*a_obs) + >>> b_obs = observe_many(('k2', 4.0), ('k3', 5.0)) + >>> b = observation_store(*b_obs) + + >>> m = merge_observation_stores(a, b) + + >>> [measured(obs) for obs in m['k1']] + [1.0, 2.0] + >>> [measured(obs) for obs in m['k2']] + [3.0, 4.0] + >>> [measured(obs) for obs in m['k3']] + [5.0] + """ + merged = {} + for t in ts: + for k in t: + _extend_series(merged, k, t[k]) + + return merged + # NOTE. Efficient algo. This function is only used for examples and testing + # so performance isn't really critical. But it'd be nice to implement an + # efficient algorithm. + + +def tabulate(store: ObservationStore) -> \ + Generator[LabelledObservation, None, None]: + """ + Stream the content of an observation store as a sequence of labelled + observations. + + :param store: the data source. + :return: a stream of labelled observations. + + Examples: + + >>> obs = observe_many(('k1', 1.0), ('k1', 2.0), ('k2', 3.0)) + >>> store = observation_store(*obs) + >>> [(k, m) for (t, m, k) in tabulate(store)] + [('k1', 1.0), ('k1', 2.0), ('k2', 3.0)] + """ + for k in store: + for ob in store[k]: + yield when(ob), measured(ob), k + + +class ObservationBuffer: + """ + Buffers observation series in an observation store. + + Examples: + + >>> buf = ObservationBuffer() + >>> buf.insert(observe('h', 1.0)) + >>> buf.insert(observe('k', 0.1)) + >>> buf.insert(observe('h', 2.0)) + >>> buf.size() + 2 + + >>> tot_bytes = buf.estimate_memory_lower_bound() + >>> 150 < tot_bytes < 200 + True + + >>> store = buf.flush() + >>> buf.size() == 0 + True + >>> len(store) == 2 + True + + >>> [measured(ob) for ob in store['h']] + [1.0, 2.0] + + """ + + def __init__(self): + """ + Create a new instance with an empty observation store. + """ + self._store: ObservationStore = {} + self._memory_lower_bound = 0 + + def size(self) -> int: + """ + :return: number of rows in the store which is the same as the number + of observation series. + """ + return len(self._store) + + def estimate_memory_lower_bound(self) -> int: + """ + :return: the minimum number of bytes the observation store can possibly + take up in memory. + """ + return self._memory_lower_bound + + def insert(self, *ts: LabelledObservation): + """ + Append each labelled observation ``t`` to the series identified by + ``t``'s label, automatically creating a new series if the label isn't + present. + + :param ts: the labelled observations to append. + """ + for t in ts: + label, ob = _split_label(t) + _extend_series(self._store, label, [ob]) + + self._memory_lower_bound += OBSERVATION_MIN_SZ * len(ts) + + def flush(self) -> ObservationStore: + """ + Discard the observation store. + + :return: the observation store just discarded. + """ + s = self._store + self._store = {} + return s + + +ObservationStoreAction = Callable[[ObservationStore], None] +""" +A function that does something with the input observation store. +""" + + +class ObservationBucket: + """ + Wraps an observation store to make it thread-safe and periodically + empty it to reclaim memory. It makes the store work like a (memory) + bucket, when the bucket is full, it gets emptied into a sink, an + ``ObservationStoreAction`` that takes the store's content and saves + it away from memory. + + Examples: + + Create a bucket with an action to print the measured values for + key "k". Set memory threshold to 0 to force calling the action + on every write to the underlying observation store. + + >>> def print_it(store): \ + print([measured(v) for v in store.get('k',[])]) + >>> bkt = ObservationBucket(empty_action=print_it, memory_threshold=0) + + Do some sampling. + + >>> bkt.put(*observe_many(('k', 1.0), ('k', 2.0))) + [1.0, 2.0] + + >>> bkt.put(observe('k', 3.0)) + [3.0] + + Call the empty method when done sampling to make sure any left over + data gets passed to the empty action which can then store it away. + + >>> bkt.empty() + [] + """ + + def __init__(self, + empty_action: ObservationStoreAction, + memory_threshold: int = 1 * 2**20 + ): + """ + Create a new instance. + + :param empty_action: a function to store the data collected so far. + It takes a single ``ObservationStore`` and returns nothing. Called + when the ``memory_threshold`` is reached or the ``empty`` method + is called. + :param memory_threshold: the amount of bytes past which the store + gets emptied. When the memory size of the observation store grows + bigger than this value, the empty action is automatically invoked. + Defaults to 1MiB. + """ + self._buffer = ObservationBuffer() + self._lock = Lock() + self._empty_action = empty_action + self._memory_threshold = memory_threshold + + def put(self, *ts: LabelledObservation): + """ + Put data into the bucket by calling the underlying store's ``insert`` + method to update observations. + + :param ts: the labelled observations to append to the existing series. + """ + flushed_store = None + with self._lock: # (1) + self._buffer.insert(*ts) + + mem = self._buffer.estimate_memory_lower_bound() + if mem > self._memory_threshold: + flushed_store = self._buffer.flush() + + if flushed_store: + self._empty_action(flushed_store) # (2) + + # NOTES. + # 1. Locking. It shouldn't affect performance adversely since typically + # ``put`` gets called after servicing the HTTP request. Also if threads + # compete for the lock, that time won't be reflected in the samples. + # 2. Confinement. We use a confinement strategy to avoid locking while + # the empty action runs. This is safe since we let go of the only ref + # to the store and passed it on to the empty action. + + def empty(self): + """ + Empty the bucket. Wipe clean the underlying store but pass the data + on to the empty action so that it can be stored away from memory. + """ + with self._lock: + store = self._buffer.flush() + self._empty_action(store) diff --git a/src/server/telemetry/pandas_import.py b/src/server/telemetry/pandas_import.py new file mode 100644 index 00000000..76b99c94 --- /dev/null +++ b/src/server/telemetry/pandas_import.py @@ -0,0 +1,170 @@ +""" +Utilities to import CSV telemetry data into Pandas data frames and series. +""" + +from glob import glob +import os +from typing import Optional + +import pandas as pd + +from server.telemetry.flush import TIMEPOINT_CSV_FIELD, \ + MEASUREMENT_CSV_FIELD, LABEL_CSV_FIELD, PID_CSV_FIELD +from server.telemetry.monitor import DURATION_FILE_PREFIX, RUNTIME_FILE_PREFIX +from server.telemetry.sampler import GC_COLLECTIONS, GC_COLLECTED, \ + GC_UNCOLLECTABLE, PROC_MAX_RSS, PROC_SYSTEM_TIME, PROC_USER_TIME + + +def _csv_file_pattern(monitoring_dir: str, file_prefix: str) -> str: + return os.path.join(monitoring_dir, f"{file_prefix}*.csv") + + +def _duration_file_pattern(monitoring_dir: str) -> str: + return _csv_file_pattern(monitoring_dir, DURATION_FILE_PREFIX) + + +def _runtime_file_pattern(monitoring_dir: str) -> str: + return _csv_file_pattern(monitoring_dir, RUNTIME_FILE_PREFIX) + + +def _parse_csv(pathname: str) -> pd.DataFrame: + def from_epoch_ns(t): return pd.to_datetime(int(t), + unit='ns', origin='unix') + converters = { + TIMEPOINT_CSV_FIELD: from_epoch_ns + } + return pd.read_csv(pathname, converters=converters) + + +def _load_csv_files(pathname_pattern: str) -> pd.DataFrame: + path_names = glob(pathname_pattern) + frames = [_parse_csv(p) for p in path_names] + return pd.concat(frames, ignore_index=True) + + +class TelemetrySeries: + + @staticmethod + def form_telemetry_data(label: str, frame: pd.DataFrame) \ + -> 'TelemetrySeries': + cols = [TIMEPOINT_CSV_FIELD, MEASUREMENT_CSV_FIELD, PID_CSV_FIELD] + proj = frame[frame[LABEL_CSV_FIELD] == label][cols] + time_indexed_data = proj.set_index(TIMEPOINT_CSV_FIELD) + + return TelemetrySeries(label, time_indexed_data) + + def __init__(self, label: str, data: pd.DataFrame): + self._label = label + self._pids = [pid for pid in data[PID_CSV_FIELD].unique()] + self._data = data + + def label(self) -> str: + """ + :return: this series' identifier. + """ + return self._label + + def pids(self) -> [int]: + """ + :return: the PIDs of the processes that produced the measurements + in this series. + """ + return self._pids + + def data(self, by_pid: Optional[int] = None) -> pd.Series: + """ + Extract the telemetry ``ObservationSeries`` as a Pandas time series. + + :param by_pid: optional argument to extract only the data collected by + the process having the given PID. + :return: a Pandas time series with the observed measurements. + """ + if by_pid: + group = self._data[self._data['PID'] == by_pid] + return group[MEASUREMENT_CSV_FIELD] + else: + return self._data[MEASUREMENT_CSV_FIELD] + + +class TelemetryFrame: + """ + Holds a Pandas frame with telemetry data combined from CSV files. + """ + + def __init__(self, frame: pd.DataFrame): + self._frame = frame + self._labels = [lbl for lbl in frame[LABEL_CSV_FIELD].unique()] + + def labels(self) -> [str]: + """ + :return: the label (unique name) of each series in the data set. + """ + return self._labels + + def time_series(self, label: str) -> TelemetrySeries: + """ + Extract the time series identified by the given label. + + :param label: the (unique) name of the series in the data set. + :return: an object to access the series data. + """ + return TelemetrySeries.form_telemetry_data(label, self._frame) + + +class TelemetryDB: + """ + Convenience class to collect all duration and runtime telemetry data + found in the monitoring directory and make the data query-able through + Pandas frames and series. + """ + + @staticmethod + def from_duration_data(monitoring_dir: str) -> 'TelemetryFrame': + """ + Collect duration telemetry data into a Pandas frame. + Import every duration CSV file in the specified monitoring directory + into a Pandas frame and then join all the frames into one. + + :param monitoring_dir: the directory containing telemetry data. + :return: the whole duration data set. + """ + data = _load_csv_files(_duration_file_pattern(monitoring_dir)) + return TelemetryFrame(data) + + @staticmethod + def from_runtime_data(monitoring_dir: str) -> 'TelemetryFrame': + """ + Collect runtime telemetry data into a Pandas frame. + Import every duration CSV file in the specified monitoring directory + into a Pandas frame and then join all the frames into one. + + :param monitoring_dir: the directory containing telemetry data. + :return: the whole runtime data set. + """ + data = _load_csv_files(_runtime_file_pattern(monitoring_dir)) + return TelemetryFrame(data) + + def __init__(self, monitoring_dir: str): + self._duration_frame = self.from_duration_data(monitoring_dir) + self._runtime_frame = self.from_runtime_data(monitoring_dir) + + def duration(self) -> TelemetryFrame: + return self._duration_frame + + def gc_collections(self) -> TelemetrySeries: + return self._runtime_frame.time_series(GC_COLLECTIONS) + + def gc_collected(self) -> TelemetrySeries: + return self._runtime_frame.time_series(GC_COLLECTED) + + def gc_uncollectable(self) -> TelemetrySeries: + return self._runtime_frame.time_series(GC_UNCOLLECTABLE) + + def max_rss(self) -> TelemetrySeries: + return self._runtime_frame.time_series(PROC_MAX_RSS) + + def system_time(self) -> TelemetrySeries: + return self._runtime_frame.time_series(PROC_SYSTEM_TIME) + + def user_time(self) -> TelemetrySeries: + return self._runtime_frame.time_series(PROC_USER_TIME) diff --git a/src/server/telemetry/sampler.py b/src/server/telemetry/sampler.py new file mode 100644 index 00000000..91c26dd9 --- /dev/null +++ b/src/server/telemetry/sampler.py @@ -0,0 +1,330 @@ +""" +Samplers to measure durations and garbage collection. +You instantiate a sampler with an ``ObservationBucket`` where sampled time +series get buffered. The bucket (memory buffer) is thread-safe and gets +automatically flushed to secondary storage when it fills up. +""" + +import gc +from resource import getrusage, RUSAGE_SELF +from threading import Thread +from time import perf_counter, sleep +from uuid import uuid4 + +from server.telemetry.observation import ObservationBucket, observe, \ + observe_many + + +class Timer: + """ + Thread-safe timer. + + Examples: + + >>> from time import sleep + >>> timer = Timer() + + >>> outer_timer_id = timer.start() + >>> sleep(0.1) + + >>> inner_timer_id = timer.start() + >>> sleep(0.1) + >>> inner_duration = timer.stop(inner_timer_id) + + >>> sleep(0.1) + >>> outer_duration = timer.stop(outer_timer_id) + + >>> outer_duration - inner_duration > 0.2 + True + """ + + def __init__(self): + """ + Create a new instance. + """ + self._timers = {} + + @staticmethod + def _new_timer_id() -> str: + timer_id = uuid4() + return timer_id.hex + + def start(self) -> str: + """ + Start a timer. + + :return: the timer ID. + """ + timer_id = self._new_timer_id() # unique, avoids race conditions. + self._timers[timer_id] = perf_counter() + + return timer_id + + def stop(self, timer_id) -> float: + """ + Stop a previously started timer and compute how much time has elapsed + since starting it. + + :param timer_id: the timer ID returned by the start call. + :return: time elapsed, in fractional seconds, from the start call. + """ + duration = perf_counter() - self._timers.pop(timer_id) + return duration + # NOTE. pop gets rid of the timer to keep memory footprint small + + +class DurationSampler: + """ + Samples durations, storing them in a given ``ObservationBucket``. + + Examples: + + >>> from time import sleep + >>> from server.telemetry.observation import measured + + # Create a bucket with an action to print the measured values for the + # key "k". Set memory threshold to 0 to force calling the action on + # every write to the underlying observation store. + >>> def print_it(store): \ + print([f"{measured(v):0.1}" for v in store.get('k',[])]) + >>> bkt = ObservationBucket(empty_action=print_it, memory_threshold=0) + + # Create a sampler with the above bucket as backend store. + >>> sampler = DurationSampler(bkt) + + >>> sample_id = sampler.sample() + >>> sleep(0.1) + >>> sampler.collect('k', sample_id) + ['0.1'] + + >>> sample_id = sampler.sample() + >>> sleep(0.2) + >>> sampler.collect('k', sample_id) + ['0.2'] + + # Call the empty method when done sampling to make sure any left over + # data gets passed to the empty action which can then store it away. + >>> sampler.bucket().empty() + [] + """ + + def __init__(self, bucket: ObservationBucket): + """ + Create a new instance. + + :param bucket: backend memory buffer where to store data. + """ + self._bucket = bucket + self._timer = Timer() + + def bucket(self) -> ObservationBucket: + """ + :return: backend memory buffer where data is stored. + """ + return self._bucket + + def sample(self) -> str: + """ + Start a duration sample. + + :return: the sample ID. + """ + return self._timer.start() + + def collect(self, key: str, sample_id: str): + """ + End the specified duration sample and add it to the samples identified + by the given key. + + :param key: identifies the duration series where the current sample + should be added. + :param sample_id: the sample ID as returned by the sample method when + the sample was started. + """ + duration = self._timer.stop(sample_id) + self._bucket.put(observe(key, duration)) + + +GC_COLLECTIONS = 'gc collections' +""" +Label for the series of total GC collections measured by the ``GCSampler``. +""" +GC_COLLECTED = 'gc collected' +""" +Label for the series of total GC collected items measured by the ``GCSampler``. +""" +GC_UNCOLLECTABLE = 'gc uncollectable' +""" +Label for the series of total GC "uncollectable" items measured by the +``GCSampler``. +""" + + +class GCSampler: + """ + Produces aggregate stats about Python garbage collection. + This class generates the three series below. + + **GC collections**. Each measurement in the series represents the total + number of times the GC collector swept memory since the interpreter was + started. (This is the total across all generations.) The series is labelled + with the value of ``GC_COLLECTIONS``. + + **GC collected**. Each measurement in the series represents the total + number of objects the GC collector freed since the interpreter was started. + (This is the total across all generations.) The series is labelled with + the value of ``GC_COLLECTED``. + + **GC uncollectable**. Each measurement in the series represents the total + number of objects the GC collector couldn't free since the interpreter was + started. (This is the total across all generations.) The series is labelled + with the value of ``GC_UNCOLLECTABLE``. + """ + + def __init__(self, bucket: ObservationBucket): + """ + Create a new instance. + + :param bucket: backend memory buffer where to store data. + """ + self._bucket = bucket + + def bucket(self) -> ObservationBucket: + """ + :return: backend memory buffer where data is stored. + """ + return self._bucket + + def sample(self): + """ + Sample the GC, aggregate the data, and add them to the series. + """ + xs = gc.get_stats() + data = [(x['collections'], x['collected'], x['uncollectable']) + for x in xs] + total_collections, total_collected, total_uncollectable = 0, 0, 0 + for d in data: + total_collections += d[0] + total_collected += d[1] + total_uncollectable += d[2] + + ys = observe_many((GC_COLLECTIONS, total_collections), + (GC_COLLECTED, total_collected), + (GC_UNCOLLECTABLE, total_uncollectable)) + + self._bucket.put(*ys) + + +PROC_USER_TIME = 'user time' +""" +Label for the user time series produced by the ``ProcSampler``. +""" +PROC_SYSTEM_TIME = 'system time' +""" +Label for the system time series produced by the ``ProcSampler``. +""" +PROC_MAX_RSS = 'max rss' +""" +Label for the maximum RSS series produced by the ``ProcSampler``. +""" + + +class ProcSampler: + """ + Collects OS resource usage data about this running process. + This class generates the three series below. + + **User Time**. Each measurement in the series is the total amount of + time, in seconds, the process spent executing in user mode. The series + is labelled with the value of ``PROC_USER_TIME``. + + **System Time**. Each measurement in the series is the total amount of + time, in seconds, the process spent executing in kernel mode. The series + is labelled with the value of ``PROC_SYSTEM_TIME``. + + **Maximum RSS**. Each measurement in the series is maximum resident set + size used. The value will be in kilobytes on Linux and bytes on MacOS. + The series is labelled with the value of ``PROC_MAX_RSS``. + """ + + def __init__(self, bucket: ObservationBucket): + """ + Create a new instance. + + :param bucket: backend memory buffer where to store data. + """ + self._bucket = bucket + + def bucket(self) -> ObservationBucket: + """ + :return: backend memory buffer where data is stored. + """ + return self._bucket + + def sample(self): + """ + Probe process user time, system (kernel) time, maximum RSS and add + these values to their respective series. + """ + try: + os_data = getrusage(RUSAGE_SELF) + xs = observe_many((PROC_USER_TIME, os_data.ru_utime), + (PROC_SYSTEM_TIME, os_data.ru_stime), + (PROC_MAX_RSS, os_data.ru_maxrss)) + self._bucket.put(*xs) + except (OSError, AttributeError): # AttributeError if os_data is None + return None + + +class RuntimeBackgroundSampler: + """ + Convenience class to sample GC and OS metrics at regular intervals in a + background daemon thread. + The thread goes on forever until the program exits, calling ``GCSampler`` + and ``ProcSampler`` every ``sampling_interval`` seconds to collect GC and + OS-level metrics using a bucket you specify. + Just before the program exits, you should call the bucket's ``empty`` + method to make sure any left over sampled data still in the memory buffer + gets processed by the bucket's empty action. + + Usage pattern: + :: + # at process start up + bucket = ObservationBucket(...) + RuntimeBackgroundSampler(bucket).spawn() + + # background thread collects data... + + # just before the process exits + bucket.empty() + + Convenient, but not very flexible: there's no way to stop the background + thread and the thread dies abruptly when the program exits. This means + ``RuntimeBackgroundSampler`` isn't suitable for buckets with empty actions + that should't be killed at random. + """ + + def __init__(self, bucket: ObservationBucket, + sampling_interval: float = 1.0): + self._gc_sampler = GCSampler(bucket) + self._proc_sampler = ProcSampler(bucket) + self._interval = sampling_interval + + def _run(self): + while True: + self._gc_sampler.sample() + self._proc_sampler.sample() + sleep(self._interval) + + def spawn(self): + """ + Start the background sampling thread. + """ + t = Thread(target=self._run, args=()) + t.daemon = True # (*) + t.start() + + # NOTE. Daemon thread. This makes sure the program won't wait on this + # thread to complete before exiting, which is what we want b/c of the + # infinite loop in the run method. The downside is that when the Python + # interpreter quits, this thread will be interrupted abruptly. diff --git a/src/server/telemetry/tests/overhead_gauge.py b/src/server/telemetry/tests/overhead_gauge.py new file mode 100644 index 00000000..1fb71586 --- /dev/null +++ b/src/server/telemetry/tests/overhead_gauge.py @@ -0,0 +1,55 @@ +import os +import resource +import time + +import server.telemetry.monitor as monitor + + +monitoring_on = True +monitoring_dir = '_monitoring' +loops = 500 +delay = 0.05 + +start_time = time.perf_counter() + + +def init(): + os.makedirs(monitoring_dir, exist_ok=True) + monitor.start(monitoring_dir=monitoring_dir, + with_gc_sampler=False, + with_profiler=False) + + +def do_work(): + time.sleep(delay) + + +def run_bare(): + for _ in range(loops): + do_work() + + +def run_with_monitoring(): + for _ in range(loops): + sample_id = monitor.start_duration_sample() + do_work() + monitor.stop_duration_sample('test', sample_id) + + +def print_readings(): + me = resource.getrusage(resource.RUSAGE_SELF) + elapsed = time.perf_counter() - start_time + + print(f"Time (seconds): {elapsed}") + print(f"Max RSS (kB on Linux | bytes on MacOS): {me.ru_maxrss}") + + +if __name__ == "__main__": + if monitoring_on: + init() + run_with_monitoring() + monitor.stop() + else: + run_bare() + + print_readings() diff --git a/src/tests/benchmark/.gitignore b/src/tests/benchmark/.gitignore new file mode 100644 index 00000000..480ea00f --- /dev/null +++ b/src/tests/benchmark/.gitignore @@ -0,0 +1 @@ +_monitoring \ No newline at end of file diff --git a/src/tests/benchmark/README.md b/src/tests/benchmark/README.md new file mode 100644 index 00000000..e8958453 --- /dev/null +++ b/src/tests/benchmark/README.md @@ -0,0 +1,6 @@ +Gauging QuantumLeap Performance +------------------------------- +> Examples of collecting and analysing performance metrics. + +See: +- https://github.com/smartsdk/ngsi-timeseries-api/wiki/Gauging-Performance diff --git a/src/tests/benchmark/analysis.py b/src/tests/benchmark/analysis.py new file mode 100644 index 00000000..6323bd05 --- /dev/null +++ b/src/tests/benchmark/analysis.py @@ -0,0 +1,80 @@ +import pandas as pd + +from server.telemetry.pandas_import import TelemetryDB, TelemetryFrame, \ + TelemetrySeries + + +def _describe(s: pd.Series): + ps = [0.25, 0.5, 0.75, 0.9, 0.95] + d = s.describe(percentiles=ps) + print(d) + + +def print_series_summary(series: TelemetrySeries, by_pid: bool = False): + if by_pid: + for pid in sorted(series.pids()): + pd_s = series.data(by_pid=pid) + print(f"Time series: {series.label()} | PID {pid}") + _describe(pd_s) + else: + pd_s = series.data() + print(f"Time series: {series.label()}") + _describe(pd_s) + + +def print_measurements_summaries(frame: TelemetryFrame, by_pid: bool = False): + for label in sorted(frame.labels()): + t = frame.time_series(label) + print_series_summary(t, by_pid) + print() + + +def measurements_per_second(series: TelemetrySeries) -> pd.Series: + return series.data().resample('1S').count() + + +def sum_by_second_difference(t1: TelemetrySeries, t2: TelemetrySeries) \ + -> (float, pd.Series): + # NB assumption: for each k . t1[k] <= t2[k] + s1 = t1.data().sum() + s2 = t2.data().sum() + diff_ratio = (s2 - s1) / s2 # ~1% => s1 ~ s2; ~99% => s2 predominant + + x = t1.data().resample('1S').sum() # sum values in each 1-second bucket + y = t2.data().resample('1S').sum() + return diff_ratio, y.sub(x) + + +def sum_by_second_ratio(t1: TelemetrySeries, t2: TelemetrySeries) \ + -> (float, pd.Series): + s1 = t1.data().sum() + s2 = t2.data().sum() + ratio = s1 / s2 + + x = t1.data().resample('1S').sum() # sum values in each 1-second bucket + y = t2.data().resample('1S').sum() + return ratio, x.divide(y) # result[k] = x[k] / y[k] + + +def plot_to_file(figure_name: str, data: pd.Series): + fig = data.plot().get_figure() + fig.savefig(f"{figure_name}.pdf") + + +db = TelemetryDB('_monitoring') + +# print_measurements_summaries(db.duration()) +# print_measurements_summaries(db.duration(), by_pid = True) +# +# print_series_summary(db.max_rss()) +# print_series_summary(db.max_rss(), by_pid = True) +# +# db.duration().labels() +# get_version = db.duration().time_series('/version [GET]') +# rps = measurements_per_second(get_version) +# rps.describe() +# plot_to_file('get-version-rps', rps) +# +# version_fn = db.duration().time_series('version()') +# ratio, diff_series = sum_by_second_difference(version_fn, get_version) +# ratio, r_series = sum_by_second_ratio(db.user_time(), db.system_time()) diff --git a/src/tests/benchmark/asyncio_driver.py b/src/tests/benchmark/asyncio_driver.py new file mode 100644 index 00000000..04853b2b --- /dev/null +++ b/src/tests/benchmark/asyncio_driver.py @@ -0,0 +1,60 @@ +import aiohttp +import asyncio +from typing import Awaitable, Callable + +from tests.benchmark.driver_base import * + + +TestTask = Callable[[aiohttp.ClientSession], Awaitable[int]] + + +async def run_notify_test(session: aiohttp.ClientSession) -> int: + async with session.post(notify_url(), json=notify_entity()) as response: + return response.status + + +async def run_version_test(session: aiohttp.ClientSession) -> int: + async with session.get(version_url()) as response: + return response.status + + +# NOTE. Timing coroutines. +# Starting a timer before `async with` and then stopping it in the block +# won't measure what you think. E.g. +# +# sample_id = monitor.start_duration_sample() +# async with session.get(version_url()) as response: +# label = f"client:version:{response.status}" +# monitor.stop_duration_sample(label, sample_id) +# +# won't actually time just how long the HTTP request took from start to +# finish, but will also include the time the various coroutines sat waiting +# in the event loop. While there's no accurate way of timing coroutines that +# I know, in the specific case of aiohttp, we could provide some half-meaningful +# measurements: +# - https://stackoverflow.com/questions/46004745 + + +def lookup_test_task(test_id: str) -> TestTask: + tasks = { + VERSION_TEST: run_version_test, + NOTIFY_TEST: run_notify_test + } + return tasks[test_id] + + +async def do_many(task: TestTask, how_many: int) -> TestRunResults: + async with aiohttp.ClientSession() as session: + tasks = [task(session) for _ in range(how_many)] + return await asyncio.gather(*tasks, return_exceptions=True) + + +class AsyncioDriver(Driver): + + def _do_run(self, test_id: str) -> TestRunResults: + test_task = lookup_test_task(test_id) + return asyncio.run(do_many(test_task, REQUESTS_N)) + + +if __name__ == "__main__": + AsyncioDriver().main() diff --git a/src/tests/benchmark/baseline-load-test.js b/src/tests/benchmark/baseline-load-test.js new file mode 100644 index 00000000..6f94e079 --- /dev/null +++ b/src/tests/benchmark/baseline-load-test.js @@ -0,0 +1,24 @@ +import http from 'k6/http'; +import { check, sleep } from 'k6'; + +export default function() { + var url = 'http://192.0.0.1:8668/version'; + const before = new Date().getTime(); + const T = 30; // time needed to complete a VU iteration + + + for (var i = 0; i < 100; i++){ + let res = http.get(url); + check(res, { 'status was 200': r => r.status == 200 }); + } + const after = new Date().getTime(); + const diff = (after - before) / 1000; + const remainder = T - diff; + if (remainder > 0) { + sleep(remainder); + } else { + console.warn( + `Timer exhausted! The execution time of the test took longer than ${T} seconds` + ); + } +} diff --git a/src/tests/benchmark/baseline-load-test.k6.sh b/src/tests/benchmark/baseline-load-test.k6.sh new file mode 100755 index 00000000..792bdb75 --- /dev/null +++ b/src/tests/benchmark/baseline-load-test.k6.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +# +# Before running this test add the time_it decorator to the version endpoint: +# +# from server.telemetry.monitor import time_it +# +# @time_it(label='version()') +# def version(): +# ... +# +# After running this test load analysis.py into the Python interpreter +# to import the telemetry data collected in the _monitoring dir and +# start an interactive data analysis session with Pandas, e.g. +# +# python -i analysis.py +# >>> print_measurements_summaries(db.duration()) +# ... +# +# Have a look at the examples in analysis.py for inspiration... +# + +mkdir -p _monitoring +rm _monitoring/* + +docker build --cache-from smartsdk/quantumleap -t smartsdk/quantumleap ../../../ + +docker-compose up -d + +sleep 10 + +docker run -i --rm loadimpact/k6 run \ + --vus 10 --duration 60s - < baseline-load-test.js + +docker-compose down -v + +echo '>>>' +echo '>>> Duration, GC and OS time series collected in _monitoring dir.' +echo '>>> Run "python -i analysis.py" to explore your data.' +echo '>>>' \ No newline at end of file diff --git a/src/tests/benchmark/baseline-load-test.sh b/src/tests/benchmark/baseline-load-test.sh new file mode 100755 index 00000000..7cce3059 --- /dev/null +++ b/src/tests/benchmark/baseline-load-test.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# +# Before running this test add the time_it decorator to the version endpoint: +# +# from server.telemetry.monitor import time_it +# +# @time_it(label='version()') +# def version(): +# ... +# +# After running this test load analysis.py into the Python interpreter +# to import the telemetry data collected in the _monitoring dir and +# start an interactive data analysis session with Pandas, e.g. +# +# python -i analysis.py +# >>> print_measurements_summaries(db.duration()) +# ... +# +# Have a look at the examples in analysis.py for inspiration... +# + +mkdir -p _monitoring +rm _monitoring/* + +docker build --cache-from smartsdk/quantumleap -t smartsdk/quantumleap ../../../ + +docker-compose up -d + +sleep 10 + +# seq 1 10000 | xargs -n1 -P10 curl 'http://localhost:8668/version' +# ^ this is the slowest client, probably because of context switches and +# lack of connection pooling. +# python asyncio_driver.py version +# ^ this is way faster as it does async I/O and pools connections, but +# the one below beats them all! +python threaded_driver.py version + +docker-compose down -v + +echo '>>>' +echo '>>> Duration, GC and OS time series collected in _monitoring dir.' +echo '>>> Run "python -i analysis.py" to explore your data.' +echo '>>>' \ No newline at end of file diff --git a/src/tests/benchmark/docker-compose.yml b/src/tests/benchmark/docker-compose.yml new file mode 100644 index 00000000..0d089ce1 --- /dev/null +++ b/src/tests/benchmark/docker-compose.yml @@ -0,0 +1,60 @@ +version: '3' + +services: + + crate: + image: crate:${CRATE_VERSION:-4.1.4} + command: crate -Cauth.host_based.enabled=false + -Ccluster.name=democluster -Chttp.cors.enabled=true -Chttp.cors.allow-origin="*" + sysctls: + net.core.somaxconn: 4096 + # see: + # - https://serverfault.com/questions/518862/ + # - http://veithen.io/2014/01/01/how-tcp-backlog-works-in-linux.html + ports: + # Admin UI + - "4200:4200" + # Transport protocol + - "4300:4300" + volumes: + - cratedata:/data + + redis: + image: redis + sysctls: + net.core.somaxconn: 4096 + # see: + # - https://serverfault.com/questions/518862/ + # - http://veithen.io/2014/01/01/how-tcp-backlog-works-in-linux.html + ports: + - "6379:6379" + + quantumleap: + image: ${QL_IMAGE:-smartsdk/quantumleap:latest} + command: --config server/gconfig_telemetry.py + sysctls: + net.core.somaxconn: 4096 + # see: + # - https://serverfault.com/questions/518862/ + # - http://veithen.io/2014/01/01/how-tcp-backlog-works-in-linux.html + ports: + - "8668:8668" + depends_on: + - redis + - crate + environment: + - CRATE_HOST=${CRATE_HOST:-crate} + - USE_GEOCODING=False + - REDIS_HOST=redis + - REDIS_PORT=6379 + - LOGLEVEL=INFO + volumes: + - ./_monitoring:/src/ngsi-timeseries-api/src/_monitoring + +volumes: + cratedata: + +networks: + default: + # driver_opts: + # com.docker.network.driver.mtu: ${DOCKER_MTU:-1400} diff --git a/src/tests/benchmark/driver_base.py b/src/tests/benchmark/driver_base.py new file mode 100644 index 00000000..cff51ad0 --- /dev/null +++ b/src/tests/benchmark/driver_base.py @@ -0,0 +1,103 @@ +import sys +from typing import Dict, Iterable, Union + +import server.telemetry.monitor as monitor + +REQUESTS_N = 10000 +QL_BASE_URL = 'http://localhost:8668' +VERSION_TEST = 'version' +NOTIFY_TEST = 'notify' + +monitoring_dir = '_monitoring' +# NOTE. Shared monitoring dir. Out of convenience, we use the same dir as +# QuantumLeap so the analysis script can import both client and server +# data in the same Pandas frame. + + +def setup_monitor(): + monitor.start(monitoring_dir=monitoring_dir, + with_runtime=False, # (*) + with_profiler=False) +# NOTE. Shared monitoring dir. Because of that, we can't collect runtime +# metrics since the series would have the same names as the server-side +# series and we wouldn't be able to tell them apart. If we do need client +# runtime stats, we should use a separate monitoring directory. + + +def version_url() -> str: + return f"{QL_BASE_URL}/version" + + +def notify_url() -> str: + return f"{QL_BASE_URL}/v2/notify" + + +def notify_entity() -> dict: + return { + "data": [ + { + "id": "Room:1", + "type": "Room", + "temperature": { + "value": 23.3, + "type": "Number" + }, + "pressure": { + "value": 720, + "type": "Integer" + } + } + ] + } + + +HttpResponseCode = int +Result = Union[HttpResponseCode, Exception] +TestRunResults = Iterable[Result] + + +def responses_by_code_count(rs: TestRunResults) -> Dict[HttpResponseCode, int]: + d = {} + codes = [r for r in rs if isinstance(r, int)] + for k in codes: + cnt = d.get(k, 0) + d[k] = cnt + 1 + return d + + +def print_test_results(rs: TestRunResults): + xs = list(rs) + exs = [x for x in xs if isinstance(x, Exception)] + for e in exs: + print(e) + print(f">>> {len(exs)} exception(s) occurred.") + + print(f">>> HTTP response count by status code:") + for code, count in responses_by_code_count(xs).items(): + print(f"HTTP {code}: {count}") + + +class Driver: + + def _do_run(self, test_id: str) -> TestRunResults: + pass + + def run(self, test_id: str): + setup_monitor() + + sample_id = monitor.start_duration_sample() + rs = self._do_run(test_id) + monitor.stop_duration_sample('client: cumulative time', sample_id) + + monitor.stop() + print_test_results(rs) + + def main(self): + try: + arg = sys.argv[1] + if arg not in (VERSION_TEST, NOTIFY_TEST): + raise IndexError() + self.run(arg) + except IndexError: + raise SystemExit( + f"Usage: {sys.argv[0]} {VERSION_TEST}|{NOTIFY_TEST}") diff --git a/src/tests/benchmark/notify-load-test.json b/src/tests/benchmark/notify-load-test.json new file mode 100644 index 00000000..ae707749 --- /dev/null +++ b/src/tests/benchmark/notify-load-test.json @@ -0,0 +1,16 @@ +{ + "data" : [ + { + "id": "Room:1", + "type": "Room", + "temperature": { + "value": 23.3, + "type": "Number" + }, + "pressure": { + "value": 720, + "type": "Integer" + } + } + ] +} \ No newline at end of file diff --git a/src/tests/benchmark/notify-load-test.sh b/src/tests/benchmark/notify-load-test.sh new file mode 100755 index 00000000..d8d12496 --- /dev/null +++ b/src/tests/benchmark/notify-load-test.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +# +# After running this test load analysis.py into the Python interpreter +# to import the telemetry data collected in the _monitoring dir and +# start an interactive data analysis session with Pandas, e.g. +# +# python -i analysis.py +# >>> print_measurements_summaries(db.duration()) +# ... +# +# Have a look at the examples in analysis.py for inspiration... +# + +mkdir -p _monitoring +rm _monitoring/* + +docker build --cache-from smartsdk/quantumleap -t smartsdk/quantumleap ../../../ + +docker-compose up -d + +sleep 10 + +# seq 1 10000 | xargs -n1 -P10 curl 'http://localhost:8668/v2/notify' \ +# -X POST -H 'Content-Type: application/json' -d @notify-load-test.json +# ^ this is the slowest client, probably because of context switches and +# lack of connection pooling. +# python asyncio_driver.py notify +# ^ this is way faster as it does async I/O and pools connections, but +# the one below beats them all! +python threaded_driver.py notify + +docker-compose down -v + +echo '>>>' +echo '>>> Duration, GC and OS time series collected in _monitoring dir.' +echo '>>> Run "python -i analysis.py" to explore your data.' +echo '>>>' \ No newline at end of file diff --git a/src/tests/benchmark/threaded_driver.py b/src/tests/benchmark/threaded_driver.py new file mode 100644 index 00000000..08302210 --- /dev/null +++ b/src/tests/benchmark/threaded_driver.py @@ -0,0 +1,72 @@ +from concurrent.futures import ThreadPoolExecutor +import requests +import threading +from typing import Callable + +from tests.benchmark.driver_base import * + + +MAX_THREAD_WORKERS = 10 + + +thread_local = threading.local() + + +def get_session() -> requests.Session: + if not hasattr(thread_local, "session"): + thread_local.session = requests.Session() + return thread_local.session + + +TestTask = Callable[[int], Result] + + +def with_session(test_label: str, + do: Callable[[requests.Session], requests.Response]) -> Result: + try: + sample_id = monitor.start_duration_sample() + with do(get_session()) as response: + label = f"client:{test_label}:{response.status_code}" + monitor.stop_duration_sample(label, sample_id) + return response.status_code + except Exception as e: + return e + # don't time exceptions + + +def run_notify_test(request_number: int) -> Result: + return with_session( + test_label=NOTIFY_TEST, + do=lambda s: s.post(notify_url(), json=notify_entity())) + + +def run_version_test(request_number: int) -> Result: + return with_session( + test_label=VERSION_TEST, + do=lambda s: s.get(version_url())) + +# NOTE. Request number. Not used at the moment, but we could use it in the +# future for request tracing in case we'll ever need more accurate, per-request, +# individual measurements. In that case, both server and client would label +# durations using the request number so then when analysing the data we can +# tell exactly, for each request, how much time was spent where. + + +def lookup_test_task(test_id: str) -> TestTask: + tasks = { + VERSION_TEST: run_version_test, + NOTIFY_TEST: run_notify_test + } + return tasks[test_id] + + +class ThreadedDriver(Driver): + + def _do_run(self, test_id: str) -> TestRunResults: + test_task = lookup_test_task(test_id) + with ThreadPoolExecutor(max_workers=MAX_THREAD_WORKERS) as executor: + return executor.map(test_task, range(REQUESTS_N)) + + +if __name__ == "__main__": + ThreadedDriver().main()